From 4633b3aa177f52b7ab34f5815a471f7ed9c266dc Mon Sep 17 00:00:00 2001
From: Thomas Wodarek
Date: Tue, 3 Oct 2023 23:44:10 -0400
Subject: [PATCH 1/6] Bump go version and deps
---
go.mod | 27 +-
go.sum | 91 +-
.../github.com/go-resty/resty/v2/.gitignore | 30 +
.../github.com/go-resty/resty/v2/BUILD.bazel | 51 +
vendor/github.com/go-resty/resty/v2/LICENSE | 21 +
vendor/github.com/go-resty/resty/v2/README.md | 925 +++++++++++
vendor/github.com/go-resty/resty/v2/WORKSPACE | 31 +
vendor/github.com/go-resty/resty/v2/client.go | 1391 ++++++++++++++++
vendor/github.com/go-resty/resty/v2/digest.go | 281 ++++
.../go-resty/resty/v2/middleware.go | 567 +++++++
.../github.com/go-resty/resty/v2/redirect.go | 109 ++
.../github.com/go-resty/resty/v2/request.go | 1093 +++++++++++++
.../github.com/go-resty/resty/v2/response.go | 177 ++
vendor/github.com/go-resty/resty/v2/resty.go | 40 +
vendor/github.com/go-resty/resty/v2/retry.go | 252 +++
vendor/github.com/go-resty/resty/v2/trace.go | 130 ++
.../github.com/go-resty/resty/v2/transport.go | 36 +
.../go-resty/resty/v2/transport112.go | 35 +
.../go-resty/resty/v2/transport_js.go | 17 +
.../go-resty/resty/v2/transport_other.go | 17 +
vendor/github.com/go-resty/resty/v2/util.go | 402 +++++
.../github.com/go-sql-driver/mysql/.gitignore | 9 +
vendor/github.com/go-sql-driver/mysql/AUTHORS | 126 ++
.../go-sql-driver/mysql/CHANGELOG.md | 266 +++
vendor/github.com/go-sql-driver/mysql/LICENSE | 373 +++++
.../github.com/go-sql-driver/mysql/README.md | 531 ++++++
.../go-sql-driver/mysql/atomic_bool.go | 19 +
.../go-sql-driver/mysql/atomic_bool_go118.go | 47 +
vendor/github.com/go-sql-driver/mysql/auth.go | 437 +++++
.../github.com/go-sql-driver/mysql/buffer.go | 182 +++
.../go-sql-driver/mysql/collations.go | 266 +++
.../go-sql-driver/mysql/conncheck.go | 55 +
.../go-sql-driver/mysql/conncheck_dummy.go | 18 +
.../go-sql-driver/mysql/connection.go | 650 ++++++++
.../go-sql-driver/mysql/connector.go | 146 ++
.../github.com/go-sql-driver/mysql/const.go | 174 ++
.../github.com/go-sql-driver/mysql/driver.go | 107 ++
vendor/github.com/go-sql-driver/mysql/dsn.go | 577 +++++++
.../github.com/go-sql-driver/mysql/errors.go | 77 +
.../github.com/go-sql-driver/mysql/fields.go | 206 +++
vendor/github.com/go-sql-driver/mysql/fuzz.go | 25 +
.../github.com/go-sql-driver/mysql/infile.go | 182 +++
.../go-sql-driver/mysql/nulltime.go | 71 +
.../github.com/go-sql-driver/mysql/packets.go | 1349 ++++++++++++++++
.../github.com/go-sql-driver/mysql/result.go | 22 +
vendor/github.com/go-sql-driver/mysql/rows.go | 223 +++
.../go-sql-driver/mysql/statement.go | 220 +++
.../go-sql-driver/mysql/transaction.go | 31 +
.../github.com/go-sql-driver/mysql/utils.go | 834 ++++++++++
.../github.com/google/go-querystring/LICENSE | 27 +
.../google/go-querystring/query/encode.go | 357 +++++
vendor/github.com/gorilla/mux/AUTHORS | 8 +
vendor/github.com/gorilla/mux/LICENSE | 27 +
vendor/github.com/gorilla/mux/README.md | 805 ++++++++++
vendor/github.com/gorilla/mux/doc.go | 306 ++++
vendor/github.com/gorilla/mux/middleware.go | 74 +
vendor/github.com/gorilla/mux/mux.go | 606 +++++++
vendor/github.com/gorilla/mux/regexp.go | 388 +++++
vendor/github.com/gorilla/mux/route.go | 736 +++++++++
vendor/github.com/gorilla/mux/test_helpers.go | 19 +
.../github.com/gorilla/websocket/.gitignore | 25 +
vendor/github.com/gorilla/websocket/AUTHORS | 9 +
vendor/github.com/gorilla/websocket/LICENSE | 22 +
vendor/github.com/gorilla/websocket/README.md | 39 +
vendor/github.com/gorilla/websocket/client.go | 422 +++++
.../gorilla/websocket/compression.go | 148 ++
vendor/github.com/gorilla/websocket/conn.go | 1230 ++++++++++++++
vendor/github.com/gorilla/websocket/doc.go | 227 +++
vendor/github.com/gorilla/websocket/join.go | 42 +
vendor/github.com/gorilla/websocket/json.go | 60 +
vendor/github.com/gorilla/websocket/mask.go | 55 +
.../github.com/gorilla/websocket/mask_safe.go | 16 +
.../github.com/gorilla/websocket/prepared.go | 102 ++
vendor/github.com/gorilla/websocket/proxy.go | 77 +
vendor/github.com/gorilla/websocket/server.go | 365 +++++
.../gorilla/websocket/tls_handshake.go | 21 +
.../gorilla/websocket/tls_handshake_116.go | 21 +
vendor/github.com/gorilla/websocket/util.go | 283 ++++
.../gorilla/websocket/x_net_proxy.go | 473 ++++++
vendor/github.com/jinzhu/gorm/.gitignore | 3 +
vendor/github.com/jinzhu/gorm/License | 21 +
vendor/github.com/jinzhu/gorm/README.md | 5 +
vendor/github.com/jinzhu/gorm/association.go | 377 +++++
vendor/github.com/jinzhu/gorm/callback.go | 250 +++
.../github.com/jinzhu/gorm/callback_create.go | 197 +++
.../github.com/jinzhu/gorm/callback_delete.go | 63 +
.../github.com/jinzhu/gorm/callback_query.go | 109 ++
.../jinzhu/gorm/callback_query_preload.go | 410 +++++
.../jinzhu/gorm/callback_row_query.go | 41 +
.../github.com/jinzhu/gorm/callback_save.go | 170 ++
.../github.com/jinzhu/gorm/callback_update.go | 121 ++
vendor/github.com/jinzhu/gorm/dialect.go | 147 ++
.../github.com/jinzhu/gorm/dialect_common.go | 196 +++
.../github.com/jinzhu/gorm/dialect_mysql.go | 246 +++
.../jinzhu/gorm/dialect_postgres.go | 147 ++
.../github.com/jinzhu/gorm/dialect_sqlite3.go | 107 ++
.../github.com/jinzhu/gorm/docker-compose.yml | 30 +
vendor/github.com/jinzhu/gorm/errors.go | 72 +
vendor/github.com/jinzhu/gorm/field.go | 66 +
vendor/github.com/jinzhu/gorm/interface.go | 24 +
.../jinzhu/gorm/join_table_handler.go | 211 +++
vendor/github.com/jinzhu/gorm/logger.go | 141 ++
vendor/github.com/jinzhu/gorm/main.go | 886 ++++++++++
vendor/github.com/jinzhu/gorm/model.go | 14 +
vendor/github.com/jinzhu/gorm/model_struct.go | 677 ++++++++
vendor/github.com/jinzhu/gorm/naming.go | 124 ++
vendor/github.com/jinzhu/gorm/scope.go | 1425 +++++++++++++++++
vendor/github.com/jinzhu/gorm/search.go | 203 +++
vendor/github.com/jinzhu/gorm/test_all.sh | 5 +
vendor/github.com/jinzhu/gorm/utils.go | 226 +++
vendor/github.com/jinzhu/gorm/wercker.yml | 149 ++
vendor/github.com/jinzhu/inflection/LICENSE | 21 +
vendor/github.com/jinzhu/inflection/README.md | 55 +
.../jinzhu/inflection/inflections.go | 273 ++++
.../github.com/jinzhu/inflection/wercker.yml | 23 +
vendor/github.com/peterhellberg/link/LICENSE | 20 +
.../github.com/peterhellberg/link/README.md | 82 +
vendor/github.com/peterhellberg/link/doc.go | 38 +
vendor/github.com/peterhellberg/link/link.go | 120 ++
vendor/github.com/slack-go/slack/.gitignore | 3 +
.../github.com/slack-go/slack/.golangci.yml | 14 +
vendor/github.com/slack-go/slack/CHANGELOG.md | 103 ++
vendor/github.com/slack-go/slack/LICENSE | 23 +
vendor/github.com/slack-go/slack/Makefile | 36 +
vendor/github.com/slack-go/slack/README.md | 111 ++
vendor/github.com/slack-go/slack/TODO.txt | 3 +
vendor/github.com/slack-go/slack/admin.go | 207 +++
vendor/github.com/slack-go/slack/apps.go | 64 +
.../github.com/slack-go/slack/attachments.go | 98 ++
vendor/github.com/slack-go/slack/audit.go | 152 ++
vendor/github.com/slack-go/slack/auth.go | 74 +
vendor/github.com/slack-go/slack/block.go | 82 +
.../github.com/slack-go/slack/block_action.go | 26 +
.../slack-go/slack/block_context.go | 32 +
.../github.com/slack-go/slack/block_conv.go | 437 +++++
.../slack-go/slack/block_divider.go | 22 +
.../slack-go/slack/block_element.go | 593 +++++++
.../github.com/slack-go/slack/block_file.go | 26 +
.../github.com/slack-go/slack/block_header.go | 38 +
.../github.com/slack-go/slack/block_image.go | 28 +
.../github.com/slack-go/slack/block_input.go | 30 +
.../github.com/slack-go/slack/block_object.go | 248 +++
.../slack-go/slack/block_rich_text.go | 383 +++++
.../slack-go/slack/block_section.go | 42 +
.../slack-go/slack/block_unknown.go | 13 +
vendor/github.com/slack-go/slack/bookmarks.go | 159 ++
vendor/github.com/slack-go/slack/bots.go | 58 +
vendor/github.com/slack-go/slack/channels.go | 36 +
vendor/github.com/slack-go/slack/chat.go | 876 ++++++++++
vendor/github.com/slack-go/slack/comment.go | 10 +
.../github.com/slack-go/slack/conversation.go | 739 +++++++++
vendor/github.com/slack-go/slack/dialog.go | 120 ++
.../slack-go/slack/dialog_select.go | 115 ++
.../github.com/slack-go/slack/dialog_text.go | 59 +
vendor/github.com/slack-go/slack/dnd.go | 151 ++
vendor/github.com/slack-go/slack/emoji.go | 35 +
vendor/github.com/slack-go/slack/errors.go | 21 +
vendor/github.com/slack-go/slack/files.go | 597 +++++++
vendor/github.com/slack-go/slack/groups.go | 7 +
vendor/github.com/slack-go/slack/history.go | 37 +
vendor/github.com/slack-go/slack/im.go | 21 +
vendor/github.com/slack-go/slack/info.go | 476 ++++++
.../github.com/slack-go/slack/interactions.go | 238 +++
.../slack/internal/backoff/backoff.go | 62 +
.../slack/internal/errorsx/errorsx.go | 17 +
.../slack-go/slack/internal/timex/timex.go | 18 +
vendor/github.com/slack-go/slack/item.go | 75 +
vendor/github.com/slack-go/slack/logger.go | 60 +
vendor/github.com/slack-go/slack/logo.png | Bin 0 -> 52440 bytes
vendor/github.com/slack-go/slack/manifests.go | 287 ++++
vendor/github.com/slack-go/slack/messageID.go | 30 +
vendor/github.com/slack-go/slack/messages.go | 259 +++
vendor/github.com/slack-go/slack/metadata.go | 7 +
vendor/github.com/slack-go/slack/misc.go | 349 ++++
vendor/github.com/slack-go/slack/oauth.go | 158 ++
.../github.com/slack-go/slack/pagination.go | 20 +
vendor/github.com/slack-go/slack/pins.go | 94 ++
vendor/github.com/slack-go/slack/reactions.go | 270 ++++
vendor/github.com/slack-go/slack/reminders.go | 132 ++
.../github.com/slack-go/slack/remotefiles.go | 316 ++++
vendor/github.com/slack-go/slack/rtm.go | 131 ++
vendor/github.com/slack-go/slack/search.go | 156 ++
vendor/github.com/slack-go/slack/security.go | 108 ++
vendor/github.com/slack-go/slack/slack.go | 174 ++
.../slack/slackevents/action_events.go | 36 +
.../slack/slackevents/inner_events.go | 673 ++++++++
.../slack/slackevents/outer_events.go | 72 +
.../slack-go/slack/slackevents/parsers.go | 258 +++
.../slack-go/slack/slackutilsx/slackutilsx.go | 64 +
vendor/github.com/slack-go/slack/slash.go | 55 +
.../github.com/slack-go/slack/socket_mode.go | 34 +
vendor/github.com/slack-go/slack/stars.go | 264 +++
.../slack-go/slack/status_code_error.go | 28 +
vendor/github.com/slack-go/slack/team.go | 236 +++
vendor/github.com/slack-go/slack/tokens.go | 50 +
.../github.com/slack-go/slack/usergroups.go | 303 ++++
vendor/github.com/slack-go/slack/users.go | 738 +++++++++
vendor/github.com/slack-go/slack/views.go | 306 ++++
vendor/github.com/slack-go/slack/webhooks.go | 63 +
vendor/github.com/slack-go/slack/websocket.go | 103 ++
.../slack-go/slack/websocket_channels.go | 72 +
.../slack/websocket_desktop_notification.go | 19 +
.../github.com/slack-go/slack/websocket_dm.go | 23 +
.../slack-go/slack/websocket_dnd.go | 8 +
.../slack-go/slack/websocket_files.go | 49 +
.../slack-go/slack/websocket_groups.go | 49 +
.../slack-go/slack/websocket_internals.go | 102 ++
.../slack-go/slack/websocket_managed_conn.go | 611 +++++++
.../slack-go/slack/websocket_misc.go | 141 ++
.../websocket_mobile_in_app_notification.go | 20 +
.../slack-go/slack/websocket_pins.go | 16 +
.../slack-go/slack/websocket_reactions.go | 25 +
.../slack-go/slack/websocket_stars.go | 14 +
.../slack-go/slack/websocket_subteam.go | 35 +
.../slack-go/slack/websocket_teams.go | 33 +
.../slack-go/slack/workflow_step.go | 98 ++
.../slack-go/slack/workflow_step_execute.go | 85 +
.../twodarek/go-cisco-webex-teams/LICENSE | 21 +
.../go-cisco-webex-teams/sdk/api_client.go | 92 ++
.../go-cisco-webex-teams/sdk/contents_api.go | 33 +
.../go-cisco-webex-teams/sdk/devices.go | 245 +++
.../go-cisco-webex-teams/sdk/licenses_api.go | 142 ++
.../go-cisco-webex-teams/sdk/meetings_api.go | 243 +++
.../sdk/memberships_api.go | 247 +++
.../go-cisco-webex-teams/sdk/messages_api.go | 221 +++
.../sdk/organizations_api.go | 140 ++
.../go-cisco-webex-teams/sdk/people_api.go | 292 ++++
.../go-cisco-webex-teams/sdk/places_api.go | 245 +++
.../go-cisco-webex-teams/sdk/recordings.go | 198 +++
.../go-cisco-webex-teams/sdk/roles_api.go | 138 ++
.../go-cisco-webex-teams/sdk/rooms_api.go | 247 +++
.../sdk/team_memberships_api.go | 237 +++
.../go-cisco-webex-teams/sdk/teams_api.go | 229 +++
.../go-cisco-webex-teams/sdk/webhooks_api.go | 270 ++++
vendor/golang.org/x/net/LICENSE | 27 +
vendor/golang.org/x/net/PATENTS | 22 +
.../x/net/publicsuffix/data/children | Bin 0 -> 2976 bytes
.../golang.org/x/net/publicsuffix/data/nodes | Bin 0 -> 46610 bytes
.../golang.org/x/net/publicsuffix/data/text | 1 +
vendor/golang.org/x/net/publicsuffix/list.go | 203 +++
vendor/golang.org/x/net/publicsuffix/table.go | 70 +
vendor/modules.txt | 42 +
242 files changed, 45050 insertions(+), 35 deletions(-)
create mode 100644 vendor/github.com/go-resty/resty/v2/.gitignore
create mode 100644 vendor/github.com/go-resty/resty/v2/BUILD.bazel
create mode 100644 vendor/github.com/go-resty/resty/v2/LICENSE
create mode 100644 vendor/github.com/go-resty/resty/v2/README.md
create mode 100644 vendor/github.com/go-resty/resty/v2/WORKSPACE
create mode 100644 vendor/github.com/go-resty/resty/v2/client.go
create mode 100644 vendor/github.com/go-resty/resty/v2/digest.go
create mode 100644 vendor/github.com/go-resty/resty/v2/middleware.go
create mode 100644 vendor/github.com/go-resty/resty/v2/redirect.go
create mode 100644 vendor/github.com/go-resty/resty/v2/request.go
create mode 100644 vendor/github.com/go-resty/resty/v2/response.go
create mode 100644 vendor/github.com/go-resty/resty/v2/resty.go
create mode 100644 vendor/github.com/go-resty/resty/v2/retry.go
create mode 100644 vendor/github.com/go-resty/resty/v2/trace.go
create mode 100644 vendor/github.com/go-resty/resty/v2/transport.go
create mode 100644 vendor/github.com/go-resty/resty/v2/transport112.go
create mode 100644 vendor/github.com/go-resty/resty/v2/transport_js.go
create mode 100644 vendor/github.com/go-resty/resty/v2/transport_other.go
create mode 100644 vendor/github.com/go-resty/resty/v2/util.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/.gitignore
create mode 100644 vendor/github.com/go-sql-driver/mysql/AUTHORS
create mode 100644 vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
create mode 100644 vendor/github.com/go-sql-driver/mysql/LICENSE
create mode 100644 vendor/github.com/go-sql-driver/mysql/README.md
create mode 100644 vendor/github.com/go-sql-driver/mysql/atomic_bool.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/auth.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/buffer.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/collations.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/connection.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/connector.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/const.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/driver.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/dsn.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/errors.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/fields.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/fuzz.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/infile.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/packets.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/result.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/rows.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/statement.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/transaction.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/utils.go
create mode 100644 vendor/github.com/google/go-querystring/LICENSE
create mode 100644 vendor/github.com/google/go-querystring/query/encode.go
create mode 100644 vendor/github.com/gorilla/mux/AUTHORS
create mode 100644 vendor/github.com/gorilla/mux/LICENSE
create mode 100644 vendor/github.com/gorilla/mux/README.md
create mode 100644 vendor/github.com/gorilla/mux/doc.go
create mode 100644 vendor/github.com/gorilla/mux/middleware.go
create mode 100644 vendor/github.com/gorilla/mux/mux.go
create mode 100644 vendor/github.com/gorilla/mux/regexp.go
create mode 100644 vendor/github.com/gorilla/mux/route.go
create mode 100644 vendor/github.com/gorilla/mux/test_helpers.go
create mode 100644 vendor/github.com/gorilla/websocket/.gitignore
create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS
create mode 100644 vendor/github.com/gorilla/websocket/LICENSE
create mode 100644 vendor/github.com/gorilla/websocket/README.md
create mode 100644 vendor/github.com/gorilla/websocket/client.go
create mode 100644 vendor/github.com/gorilla/websocket/compression.go
create mode 100644 vendor/github.com/gorilla/websocket/conn.go
create mode 100644 vendor/github.com/gorilla/websocket/doc.go
create mode 100644 vendor/github.com/gorilla/websocket/join.go
create mode 100644 vendor/github.com/gorilla/websocket/json.go
create mode 100644 vendor/github.com/gorilla/websocket/mask.go
create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go
create mode 100644 vendor/github.com/gorilla/websocket/prepared.go
create mode 100644 vendor/github.com/gorilla/websocket/proxy.go
create mode 100644 vendor/github.com/gorilla/websocket/server.go
create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go
create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go
create mode 100644 vendor/github.com/gorilla/websocket/util.go
create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go
create mode 100644 vendor/github.com/jinzhu/gorm/.gitignore
create mode 100644 vendor/github.com/jinzhu/gorm/License
create mode 100644 vendor/github.com/jinzhu/gorm/README.md
create mode 100644 vendor/github.com/jinzhu/gorm/association.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_create.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_delete.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_query.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_query_preload.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_row_query.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_save.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_update.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect_common.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect_mysql.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect_postgres.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect_sqlite3.go
create mode 100644 vendor/github.com/jinzhu/gorm/docker-compose.yml
create mode 100644 vendor/github.com/jinzhu/gorm/errors.go
create mode 100644 vendor/github.com/jinzhu/gorm/field.go
create mode 100644 vendor/github.com/jinzhu/gorm/interface.go
create mode 100644 vendor/github.com/jinzhu/gorm/join_table_handler.go
create mode 100644 vendor/github.com/jinzhu/gorm/logger.go
create mode 100644 vendor/github.com/jinzhu/gorm/main.go
create mode 100644 vendor/github.com/jinzhu/gorm/model.go
create mode 100644 vendor/github.com/jinzhu/gorm/model_struct.go
create mode 100644 vendor/github.com/jinzhu/gorm/naming.go
create mode 100644 vendor/github.com/jinzhu/gorm/scope.go
create mode 100644 vendor/github.com/jinzhu/gorm/search.go
create mode 100644 vendor/github.com/jinzhu/gorm/test_all.sh
create mode 100644 vendor/github.com/jinzhu/gorm/utils.go
create mode 100644 vendor/github.com/jinzhu/gorm/wercker.yml
create mode 100644 vendor/github.com/jinzhu/inflection/LICENSE
create mode 100644 vendor/github.com/jinzhu/inflection/README.md
create mode 100644 vendor/github.com/jinzhu/inflection/inflections.go
create mode 100644 vendor/github.com/jinzhu/inflection/wercker.yml
create mode 100644 vendor/github.com/peterhellberg/link/LICENSE
create mode 100644 vendor/github.com/peterhellberg/link/README.md
create mode 100644 vendor/github.com/peterhellberg/link/doc.go
create mode 100644 vendor/github.com/peterhellberg/link/link.go
create mode 100644 vendor/github.com/slack-go/slack/.gitignore
create mode 100644 vendor/github.com/slack-go/slack/.golangci.yml
create mode 100644 vendor/github.com/slack-go/slack/CHANGELOG.md
create mode 100644 vendor/github.com/slack-go/slack/LICENSE
create mode 100644 vendor/github.com/slack-go/slack/Makefile
create mode 100644 vendor/github.com/slack-go/slack/README.md
create mode 100644 vendor/github.com/slack-go/slack/TODO.txt
create mode 100644 vendor/github.com/slack-go/slack/admin.go
create mode 100644 vendor/github.com/slack-go/slack/apps.go
create mode 100644 vendor/github.com/slack-go/slack/attachments.go
create mode 100644 vendor/github.com/slack-go/slack/audit.go
create mode 100644 vendor/github.com/slack-go/slack/auth.go
create mode 100644 vendor/github.com/slack-go/slack/block.go
create mode 100644 vendor/github.com/slack-go/slack/block_action.go
create mode 100644 vendor/github.com/slack-go/slack/block_context.go
create mode 100644 vendor/github.com/slack-go/slack/block_conv.go
create mode 100644 vendor/github.com/slack-go/slack/block_divider.go
create mode 100644 vendor/github.com/slack-go/slack/block_element.go
create mode 100644 vendor/github.com/slack-go/slack/block_file.go
create mode 100644 vendor/github.com/slack-go/slack/block_header.go
create mode 100644 vendor/github.com/slack-go/slack/block_image.go
create mode 100644 vendor/github.com/slack-go/slack/block_input.go
create mode 100644 vendor/github.com/slack-go/slack/block_object.go
create mode 100644 vendor/github.com/slack-go/slack/block_rich_text.go
create mode 100644 vendor/github.com/slack-go/slack/block_section.go
create mode 100644 vendor/github.com/slack-go/slack/block_unknown.go
create mode 100644 vendor/github.com/slack-go/slack/bookmarks.go
create mode 100644 vendor/github.com/slack-go/slack/bots.go
create mode 100644 vendor/github.com/slack-go/slack/channels.go
create mode 100644 vendor/github.com/slack-go/slack/chat.go
create mode 100644 vendor/github.com/slack-go/slack/comment.go
create mode 100644 vendor/github.com/slack-go/slack/conversation.go
create mode 100644 vendor/github.com/slack-go/slack/dialog.go
create mode 100644 vendor/github.com/slack-go/slack/dialog_select.go
create mode 100644 vendor/github.com/slack-go/slack/dialog_text.go
create mode 100644 vendor/github.com/slack-go/slack/dnd.go
create mode 100644 vendor/github.com/slack-go/slack/emoji.go
create mode 100644 vendor/github.com/slack-go/slack/errors.go
create mode 100644 vendor/github.com/slack-go/slack/files.go
create mode 100644 vendor/github.com/slack-go/slack/groups.go
create mode 100644 vendor/github.com/slack-go/slack/history.go
create mode 100644 vendor/github.com/slack-go/slack/im.go
create mode 100644 vendor/github.com/slack-go/slack/info.go
create mode 100644 vendor/github.com/slack-go/slack/interactions.go
create mode 100644 vendor/github.com/slack-go/slack/internal/backoff/backoff.go
create mode 100644 vendor/github.com/slack-go/slack/internal/errorsx/errorsx.go
create mode 100644 vendor/github.com/slack-go/slack/internal/timex/timex.go
create mode 100644 vendor/github.com/slack-go/slack/item.go
create mode 100644 vendor/github.com/slack-go/slack/logger.go
create mode 100644 vendor/github.com/slack-go/slack/logo.png
create mode 100644 vendor/github.com/slack-go/slack/manifests.go
create mode 100644 vendor/github.com/slack-go/slack/messageID.go
create mode 100644 vendor/github.com/slack-go/slack/messages.go
create mode 100644 vendor/github.com/slack-go/slack/metadata.go
create mode 100644 vendor/github.com/slack-go/slack/misc.go
create mode 100644 vendor/github.com/slack-go/slack/oauth.go
create mode 100644 vendor/github.com/slack-go/slack/pagination.go
create mode 100644 vendor/github.com/slack-go/slack/pins.go
create mode 100644 vendor/github.com/slack-go/slack/reactions.go
create mode 100644 vendor/github.com/slack-go/slack/reminders.go
create mode 100644 vendor/github.com/slack-go/slack/remotefiles.go
create mode 100644 vendor/github.com/slack-go/slack/rtm.go
create mode 100644 vendor/github.com/slack-go/slack/search.go
create mode 100644 vendor/github.com/slack-go/slack/security.go
create mode 100644 vendor/github.com/slack-go/slack/slack.go
create mode 100644 vendor/github.com/slack-go/slack/slackevents/action_events.go
create mode 100644 vendor/github.com/slack-go/slack/slackevents/inner_events.go
create mode 100644 vendor/github.com/slack-go/slack/slackevents/outer_events.go
create mode 100644 vendor/github.com/slack-go/slack/slackevents/parsers.go
create mode 100644 vendor/github.com/slack-go/slack/slackutilsx/slackutilsx.go
create mode 100644 vendor/github.com/slack-go/slack/slash.go
create mode 100644 vendor/github.com/slack-go/slack/socket_mode.go
create mode 100644 vendor/github.com/slack-go/slack/stars.go
create mode 100644 vendor/github.com/slack-go/slack/status_code_error.go
create mode 100644 vendor/github.com/slack-go/slack/team.go
create mode 100644 vendor/github.com/slack-go/slack/tokens.go
create mode 100644 vendor/github.com/slack-go/slack/usergroups.go
create mode 100644 vendor/github.com/slack-go/slack/users.go
create mode 100644 vendor/github.com/slack-go/slack/views.go
create mode 100644 vendor/github.com/slack-go/slack/webhooks.go
create mode 100644 vendor/github.com/slack-go/slack/websocket.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_channels.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_desktop_notification.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_dm.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_dnd.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_files.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_groups.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_internals.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_managed_conn.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_misc.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_mobile_in_app_notification.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_pins.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_reactions.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_stars.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_subteam.go
create mode 100644 vendor/github.com/slack-go/slack/websocket_teams.go
create mode 100644 vendor/github.com/slack-go/slack/workflow_step.go
create mode 100644 vendor/github.com/slack-go/slack/workflow_step_execute.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/LICENSE
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/api_client.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/contents_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/devices.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/licenses_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/meetings_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/memberships_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/messages_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/organizations_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/people_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/places_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/recordings.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/roles_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/rooms_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/team_memberships_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/teams_api.go
create mode 100644 vendor/github.com/twodarek/go-cisco-webex-teams/sdk/webhooks_api.go
create mode 100644 vendor/golang.org/x/net/LICENSE
create mode 100644 vendor/golang.org/x/net/PATENTS
create mode 100644 vendor/golang.org/x/net/publicsuffix/data/children
create mode 100644 vendor/golang.org/x/net/publicsuffix/data/nodes
create mode 100644 vendor/golang.org/x/net/publicsuffix/data/text
create mode 100644 vendor/golang.org/x/net/publicsuffix/list.go
create mode 100644 vendor/golang.org/x/net/publicsuffix/table.go
create mode 100644 vendor/modules.txt
diff --git a/go.mod b/go.mod
index a8fe5a2..22aa09f 100644
--- a/go.mod
+++ b/go.mod
@@ -1,19 +1,20 @@
module github.com/twodarek/barcampgr-teams-bot
-go 1.17
+go 1.21
require (
- github.com/go-resty/resty/v2 v2.0.0 // indirect
- github.com/go-sql-driver/mysql v1.4.1
- github.com/google/go-querystring v1.0.0 // indirect
- github.com/gorilla/mux v1.7.4
- github.com/gorilla/websocket v1.4.2 // indirect
- github.com/jinzhu/gorm v1.9.12
- github.com/jinzhu/inflection v1.0.0 // indirect
- github.com/peterhellberg/link v1.0.0 // indirect
- github.com/pkg/errors v0.8.0 // indirect
- github.com/slack-go/slack v0.7.2
+ github.com/go-sql-driver/mysql v1.7.1
+ github.com/gorilla/mux v1.8.0
+ github.com/jinzhu/gorm v1.9.16
+ github.com/slack-go/slack v0.12.3
github.com/twodarek/go-cisco-webex-teams v0.4.1-0.20200803230619-bf35b2bae4c8
- golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect
- google.golang.org/appengine v1.4.0 // indirect
+)
+
+require (
+ github.com/go-resty/resty/v2 v2.9.1 // indirect
+ github.com/google/go-querystring v1.1.0 // indirect
+ github.com/gorilla/websocket v1.5.0 // indirect
+ github.com/jinzhu/inflection v1.0.0 // indirect
+ github.com/peterhellberg/link v1.2.0 // indirect
+ golang.org/x/net v0.15.0 // indirect
)
diff --git a/go.sum b/go.sum
index 8f46c76..72a5ab8 100644
--- a/go.sum
+++ b/go.sum
@@ -1,56 +1,103 @@
+github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
+github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
-github.com/go-resty/resty/v2 v2.0.0 h1:9Nq/U+V4xsoDnDa/iTrABDWUCuk3Ne92XFHPe6dKWUc=
github.com/go-resty/resty/v2 v2.0.0/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
-github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-resty/resty/v2 v2.9.1 h1:PIgGx4VrHvag0juCJ4dDv3MiFRlDmP0vicBucwf+gLM=
+github.com/go-resty/resty/v2 v2.9.1/go.mod h1:4/GYJVjh9nhkhGR6AUNW3XhpDYNUr+Uvy9gV/VGZIy4=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho=
github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
-github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
-github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/jinzhu/gorm v1.9.12 h1:Drgk1clyWT9t9ERbzHza6Mj/8FY/CqMyVzOiHviMo6Q=
-github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/jinzhu/gorm v1.9.16 h1:+IyIjPEABKRpsu/F8OvDPy9fyQlgsg2luMV2ZIH5i5o=
+github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.0.1 h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M=
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=
-github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/peterhellberg/link v1.0.0 h1:mUWkiegowUXEcmlb+ybF75Q/8D2Y0BjZtR8cxoKhaQo=
+github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA=
+github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
github.com/peterhellberg/link v1.0.0/go.mod h1:gtSlOT4jmkY8P47hbTc8PTgiDDWpdPbFYl75keYyBB8=
-github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/peterhellberg/link v1.2.0 h1:UA5pg3Gp/E0F2WdX7GERiNrPQrM1K6CVJUUWfHa4t6c=
+github.com/peterhellberg/link v1.2.0/go.mod h1:gYfAh+oJgQu2SrZHg5hROVRQe1ICoK0/HHJTcE0edxc=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/slack-go/slack v0.7.2 h1:oLy2a2YqrtoHSSxbjRhrtLDGbCKcZJwgbuQ826BWxaI=
-github.com/slack-go/slack v0.7.2/go.mod h1:FGqNzJBmxIsZURAxh2a8D21AnOVvvXZvGligs4npPUM=
+github.com/slack-go/slack v0.12.3 h1:92/dfFU8Q5XP6Wp5rr5/T5JHLM5c5Smtn53fhToAP88=
+github.com/slack-go/slack v0.12.3/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/twodarek/go-cisco-webex-teams v0.4.1-0.20200803230619-bf35b2bae4c8 h1:4Yeqivf1+ZQ2afWuNeCIW3UNherWQ1B1L9dGRKu90dU=
github.com/twodarek/go-cisco-webex-teams v0.4.1-0.20200803230619-bf35b2bae4c8/go.mod h1:2yhuo7zg8oKErbxVd6KBNFJFqOP2Ib58CHWU3ceUniQ=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd h1:GGJVjV8waZKRHrgwvtH66z9ZGVurTD1MT0n1Bb+q4aM=
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
+golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/go-resty/resty/v2/.gitignore b/vendor/github.com/go-resty/resty/v2/.gitignore
new file mode 100644
index 0000000..9e856bd
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/.gitignore
@@ -0,0 +1,30 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+coverage.out
+coverage.txt
+
+# Exclude intellij IDE folders
+.idea/*
diff --git a/vendor/github.com/go-resty/resty/v2/BUILD.bazel b/vendor/github.com/go-resty/resty/v2/BUILD.bazel
new file mode 100644
index 0000000..f461c29
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/BUILD.bazel
@@ -0,0 +1,51 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@bazel_gazelle//:def.bzl", "gazelle")
+
+# gazelle:prefix github.com/go-resty/resty/v2
+# gazelle:go_naming_convention import_alias
+gazelle(name = "gazelle")
+
+go_library(
+ name = "resty",
+ srcs = [
+ "client.go",
+ "digest.go",
+ "middleware.go",
+ "redirect.go",
+ "request.go",
+ "response.go",
+ "resty.go",
+ "retry.go",
+ "trace.go",
+ "transport_js.go",
+ "transport_other.go",
+ "transport.go",
+ "transport112.go",
+ "util.go",
+ ],
+ importpath = "github.com/go-resty/resty/v2",
+ visibility = ["//visibility:public"],
+ deps = ["@org_golang_x_net//publicsuffix:go_default_library"],
+)
+
+go_test(
+ name = "resty_test",
+ srcs = [
+ "client_test.go",
+ "context_test.go",
+ "example_test.go",
+ "request_test.go",
+ "resty_test.go",
+ "retry_test.go",
+ "util_test.go",
+ ],
+ data = glob([".testdata/*"]),
+ embed = [":resty"],
+ deps = ["@org_golang_x_net//proxy:go_default_library"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":resty",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/go-resty/resty/v2/LICENSE b/vendor/github.com/go-resty/resty/v2/LICENSE
new file mode 100644
index 0000000..0c2d38a
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015-2023 Jeevanandam M., https://myjeeva.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-resty/resty/v2/README.md b/vendor/github.com/go-resty/resty/v2/README.md
new file mode 100644
index 0000000..294a288
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/README.md
@@ -0,0 +1,925 @@
+
+
Resty
+Simple HTTP and REST client library for Go (inspired by Ruby rest-client)
+Features section describes in detail about Resty capabilities
+
+
+
+
+
+
Resty Communication Channels
+
+
+
+## News
+
+ * v2.9.1 [released](https://github.com/go-resty/resty/releases/tag/v2.9.1) and tagged on Sep 30, 2023.
+ * v2.0.0 [released](https://github.com/go-resty/resty/releases/tag/v2.0.0) and tagged on Jul 16, 2019.
+ * v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019.
+ * v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors).
+
+## Features
+
+ * GET, POST, PUT, DELETE, HEAD, PATCH, OPTIONS, etc.
+ * Simple and chainable methods for settings and request
+ * [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request) Body can be `string`, `[]byte`, `struct`, `map`, `slice` and `io.Reader` too
+ * Auto detects `Content-Type`
+ * Buffer less processing for `io.Reader`
+ * Native `*http.Request` instance may be accessed during middleware and request execution via `Request.RawRequest`
+ * Request Body can be read multiple times via `Request.RawRequest.GetBody()`
+ * [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Response) object gives you more possibility
+ * Access as `[]byte` array - `response.Body()` OR Access as `string` - `response.String()`
+ * Know your `response.Time()` and when we `response.ReceivedAt()`
+ * Automatic marshal and unmarshal for `JSON` and `XML` content type
+ * Default is `JSON`, if you supply `struct/map` without header `Content-Type`
+ * For auto-unmarshal, refer to -
+ - Success scenario [Request.SetResult()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetResult) and [Response.Result()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Result).
+ - Error scenario [Request.SetError()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetError) and [Response.Error()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Error).
+ - Supports [RFC7807](https://tools.ietf.org/html/rfc7807) - `application/problem+json` & `application/problem+xml`
+ * Resty provides an option to override [JSON Marshal/Unmarshal and XML Marshal/Unmarshal](#override-json--xml-marshalunmarshal)
+ * Easy to upload one or more file(s) via `multipart/form-data`
+ * Auto detects file content type
+ * Request URL [Path Params (aka URI Params)](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetPathParams)
+ * Backoff Retry Mechanism with retry condition function [reference](retry_test.go)
+ * Resty client HTTP & REST [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnBeforeRequest) and [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnAfterResponse) middlewares
+ * `Request.SetContext` supported
+ * Authorization option of `BasicAuth` and `Bearer` token
+ * Set request `ContentLength` value for all request or particular request
+ * Custom [Root Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetRootCertificate) and Client [Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetCertificates)
+ * Download/Save HTTP response directly into File, like `curl -o` flag. See [SetOutputDirectory](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetOutputDirectory) & [SetOutput](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetOutput).
+ * Cookies for your request and CookieJar support
+ * SRV Record based request instead of Host URL
+ * Client settings like `Timeout`, `RedirectPolicy`, `Proxy`, `TLSClientConfig`, `Transport`, etc.
+ * Optionally allows GET request with payload, see [SetAllowGetMethodPayload](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetAllowGetMethodPayload)
+ * Supports registering external JSON library into resty, see [how to use](https://github.com/go-resty/resty/issues/76#issuecomment-314015250)
+ * Exposes Response reader without reading response (no auto-unmarshaling) if need be, see [how to use](https://github.com/go-resty/resty/issues/87#issuecomment-322100604)
+ * Option to specify expected `Content-Type` when response `Content-Type` header missing. Refer to [#92](https://github.com/go-resty/resty/issues/92)
+ * Resty design
+ * Have client level settings & options and also override at Request level if you want to
+ * Request and Response middleware
+ * Create Multiple clients if you want to `resty.New()`
+ * Supports `http.RoundTripper` implementation, see [SetTransport](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetTransport)
+ * goroutine concurrent safe
+ * Resty Client trace, see [Client.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.EnableTrace) and [Request.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.EnableTrace)
+ * Since v2.4.0, trace info contains a `RequestAttempt` value, and the `Request` object contains an `Attempt` attribute
+ * Debug mode - clean and informative logging presentation
+ * Gzip - Go does it automatically also resty has fallback handling too
+ * Works fine with `HTTP/2` and `HTTP/1.1`
+ * [Bazel support](#bazel-support)
+ * Easily mock Resty for testing, [for e.g.](#mocking-http-requests-using-httpmock-library)
+ * Well tested client library
+
+### Included Batteries
+
+ * Redirect Policies - see [how to use](#redirect-policy)
+ * NoRedirectPolicy
+ * FlexibleRedirectPolicy
+ * DomainCheckRedirectPolicy
+ * etc. [more info](redirect.go)
+ * Retry Mechanism [how to use](#retries)
+ * Backoff Retry
+ * Conditional Retry
+ * Since v2.6.0, Retry Hooks - [Client](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.AddRetryHook), [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.AddRetryHook)
+ * SRV Record based request instead of Host URL [how to use](resty_test.go#L1412)
+ * etc (upcoming - throw your idea's [here](https://github.com/go-resty/resty/issues)).
+
+
+#### Supported Go Versions
+
+Recommended to use `go1.16` and above.
+
+Initially Resty started supporting `go modules` since `v1.10.0` release.
+
+Starting Resty v2 and higher versions, it fully embraces [go modules](https://github.com/golang/go/wiki/Modules) package release. It requires a Go version capable of understanding `/vN` suffixed imports:
+
+- 1.9.7+
+- 1.10.3+
+- 1.11+
+
+
+## It might be beneficial for your project :smile:
+
+Resty author also published following projects for Go Community.
+
+ * [aah framework](https://aahframework.org) - A secure, flexible, rapid Go web framework.
+ * [THUMBAI](https://thumbai.app) - Go Mod Repository, Go Vanity Service and Simple Proxy Server.
+ * [go-model](https://github.com/jeevatkm/go-model) - Robust & Easy to use model mapper and utility methods for Go `struct`.
+
+
+## Installation
+
+```bash
+# Go Modules
+require github.com/go-resty/resty/v2 v2.7.0
+```
+
+## Usage
+
+The following samples will assist you to become as comfortable as possible with resty library.
+
+```go
+// Import resty into your code and refer it as `resty`.
+import "github.com/go-resty/resty/v2"
+```
+
+#### Simple GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ EnableTrace().
+ Get("https://httpbin.org/get")
+
+// Explore response object
+fmt.Println("Response Info:")
+fmt.Println(" Error :", err)
+fmt.Println(" Status Code:", resp.StatusCode())
+fmt.Println(" Status :", resp.Status())
+fmt.Println(" Proto :", resp.Proto())
+fmt.Println(" Time :", resp.Time())
+fmt.Println(" Received At:", resp.ReceivedAt())
+fmt.Println(" Body :\n", resp)
+fmt.Println()
+
+// Explore trace info
+fmt.Println("Request Trace Info:")
+ti := resp.Request.TraceInfo()
+fmt.Println(" DNSLookup :", ti.DNSLookup)
+fmt.Println(" ConnTime :", ti.ConnTime)
+fmt.Println(" TCPConnTime :", ti.TCPConnTime)
+fmt.Println(" TLSHandshake :", ti.TLSHandshake)
+fmt.Println(" ServerTime :", ti.ServerTime)
+fmt.Println(" ResponseTime :", ti.ResponseTime)
+fmt.Println(" TotalTime :", ti.TotalTime)
+fmt.Println(" IsConnReused :", ti.IsConnReused)
+fmt.Println(" IsConnWasIdle :", ti.IsConnWasIdle)
+fmt.Println(" ConnIdleTime :", ti.ConnIdleTime)
+fmt.Println(" RequestAttempt:", ti.RequestAttempt)
+fmt.Println(" RemoteAddr :", ti.RemoteAddr.String())
+
+/* Output
+Response Info:
+ Error :
+ Status Code: 200
+ Status : 200 OK
+ Proto : HTTP/2.0
+ Time : 457.034718ms
+ Received At: 2020-09-14 15:35:29.784681 -0700 PDT m=+0.458137045
+ Body :
+ {
+ "args": {},
+ "headers": {
+ "Accept-Encoding": "gzip",
+ "Host": "httpbin.org",
+ "User-Agent": "go-resty/2.4.0 (https://github.com/go-resty/resty)",
+ "X-Amzn-Trace-Id": "Root=1-5f5ff031-000ff6292204aa6898e4de49"
+ },
+ "origin": "0.0.0.0",
+ "url": "https://httpbin.org/get"
+ }
+
+Request Trace Info:
+ DNSLookup : 4.074657ms
+ ConnTime : 381.709936ms
+ TCPConnTime : 77.428048ms
+ TLSHandshake : 299.623597ms
+ ServerTime : 75.414703ms
+ ResponseTime : 79.337µs
+ TotalTime : 457.034718ms
+ IsConnReused : false
+ IsConnWasIdle : false
+ ConnIdleTime : 0s
+ RequestAttempt: 1
+ RemoteAddr : 3.221.81.55:443
+*/
+```
+
+#### Enhanced GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetQueryParams(map[string]string{
+ "page_no": "1",
+ "limit": "20",
+ "sort":"name",
+ "order": "asc",
+ "random":strconv.FormatInt(time.Now().Unix(), 10),
+ }).
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/search_result")
+
+
+// Sample of using Request.SetQueryString method
+resp, err := client.R().
+ SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more").
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/show_product")
+
+
+// If necessary, you can force response content type to tell Resty to parse a JSON response into your struct
+resp, err := client.R().
+ SetResult(result).
+ ForceContentType("application/json").
+ Get("v2/alpine/manifests/latest")
+```
+
+#### Various POST method combinations
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// POST JSON string
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{"username":"testuser", "password":"testpass"}`).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST []byte array
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST Struct, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(User{Username: "testuser", Password: "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST Map, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST of raw bytes for file upload. For example: upload file to Dropbox
+fileBytes, _ := os.ReadFile("/Users/jeeva/mydocument.pdf")
+
+// See we are not setting content-type header, since go-resty automatically detects Content-Type for you
+resp, err := client.R().
+ SetBody(fileBytes).
+ SetContentLength(true). // Dropbox expects this value
+ SetAuthToken("").
+ SetError(&DropboxError{}). // or SetError(DropboxError{}).
+ Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // for upload Dropbox supports PUT too
+
+// Note: resty detects Content-Type for request body/payload if content type header is not set.
+// * For struct and map data type defaults to 'application/json'
+// * Fallback is plain text content type
+```
+
+#### Sample PUT
+
+You can use various combinations of `PUT` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Title: "go-resty",
+ Content: "This is my article content, oh ya!",
+ Author: "Jeevanandam M",
+ Tags: []string{"article", "sample", "resty"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Put("https://myapp.com/article/1234")
+```
+
+#### Sample PATCH
+
+You can use various combinations of `PATCH` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Tags: []string{"new tag1", "new tag2"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Patch("https://myapp.com/articles/1234")
+```
+
+#### Sample DELETE, HEAD, OPTIONS
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// DELETE a article
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Delete("https://myapp.com/articles/1234")
+
+// DELETE a articles with payload/body as a JSON string
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{article_ids: [1002, 1006, 1007, 87683, 45432] }`).
+ Delete("https://myapp.com/articles")
+
+// HEAD of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Head("https://myapp.com/videos/hi-res-video")
+
+// OPTIONS of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Options("https://myapp.com/servers/nyc-dc-01")
+```
+
+#### Override JSON & XML Marshal/Unmarshal
+
+User could register choice of JSON/XML library into resty or write your own. By default resty registers standard `encoding/json` and `encoding/xml` respectively.
+```go
+// Example of registering json-iterator
+import jsoniter "github.com/json-iterator/go"
+
+json := jsoniter.ConfigCompatibleWithStandardLibrary
+
+client := resty.New().
+ SetJSONMarshaler(json.Marshal).
+ SetJSONUnmarshaler(json.Unmarshal)
+
+// similarly user could do for XML too with -
+client.SetXMLMarshaler(xml.Marshal).
+ SetXMLUnmarshaler(xml.Unmarshal)
+```
+
+### Multipart File(s) upload
+
+#### Using io.Reader
+
+```go
+profileImgBytes, _ := os.ReadFile("/Users/jeeva/test-img.png")
+notesBytes, _ := os.ReadFile("/Users/jeeva/text-file.txt")
+
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)).
+ SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ }).
+ Post("http://myapp.com/upload")
+```
+
+#### Using File directly from Path
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Single file scenario
+resp, err := client.R().
+ SetFile("profile_img", "/Users/jeeva/test-img.png").
+ Post("http://myapp.com/upload")
+
+// Multiple files scenario
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ Post("http://myapp.com/upload")
+
+// Multipart of form fields and files
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "my city",
+ "access_token": "C6A79608-782F-4ED0-A11D-BD82FAD829CD",
+ }).
+ Post("http://myapp.com/profile")
+```
+
+#### Sample Form submission
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// just mentioning about POST as an example with simple flow
+// User Login
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "username": "jeeva",
+ "password": "mypass",
+ }).
+ Post("http://myapp.com/login")
+
+// Followed by profile update
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "new city update",
+ }).
+ Post("http://myapp.com/profile")
+
+// Multi value form data
+criteria := url.Values{
+ "search_criteria": []string{"book", "glass", "pencil"},
+}
+resp, err := client.R().
+ SetFormDataFromValues(criteria).
+ Post("http://myapp.com/search")
+```
+
+#### Save HTTP Response into File
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting output directory path, If directory not exists then resty creates one!
+// This is optional one, if you're planning using absolute path in
+// `Request.SetOutput` and can used together.
+client.SetOutputDirectory("/Users/jeeva/Downloads")
+
+// HTTP response gets saved into file, similar to curl -o flag
+_, err := client.R().
+ SetOutput("plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+
+// OR using absolute path
+// Note: output directory path is not used for absolute path
+_, err := client.R().
+ SetOutput("/MyDownloads/plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+```
+
+#### Request URL Path Params
+
+Resty provides easy to use dynamic request URL path params. Params can be set at client and request level. Client level params value can be overridden at request level.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.R().SetPathParams(map[string]string{
+ "userId": "sample@sample.com",
+ "subAccountId": "100002",
+}).
+Get("/v1/users/{userId}/{subAccountId}/details")
+
+// Result:
+// Composed URL - /v1/users/sample@sample.com/100002/details
+```
+
+#### Request and Response Middleware
+
+Resty provides middleware ability to manipulate for Request and Response. It is more flexible than callback approach.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Registering Request Middleware
+client.OnBeforeRequest(func(c *resty.Client, req *resty.Request) error {
+ // Now you have access to Client and current Request object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+
+// Registering Response Middleware
+client.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error {
+ // Now you have access to Client and current Response object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+```
+
+#### OnError Hooks
+
+Resty provides OnError hooks that may be called because:
+
+- The client failed to send the request due to connection timeout, TLS handshake failure, etc...
+- The request was retried the maximum amount of times, and still failed.
+
+If there was a response from the server, the original error will be wrapped in `*resty.ResponseError` which contains the last response received.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.OnError(func(req *resty.Request, err error) {
+ if v, ok := err.(*resty.ResponseError); ok {
+ // v.Response contains the last response from the server
+ // v.Err contains the original error
+ }
+ // Log the error, increment a metric, etc...
+})
+```
+
+#### Redirect Policy
+
+Resty provides few ready to use redirect policy(s) also it supports multiple policies together.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Assign Client Redirect Policy. Create one as per you need
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(15))
+
+// Wanna multiple policies such as redirect count, domain name check, etc
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(20),
+ resty.DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+```
+
+##### Custom Redirect Policy
+
+Implement [RedirectPolicy](redirect.go#L20) interface and register it with resty client. Have a look [redirect.go](redirect.go) for more information.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Using raw func into resty.SetRedirectPolicy
+client.SetRedirectPolicy(resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}))
+
+//---------------------------------------------------
+
+// Using struct create more flexible redirect policy
+type CustomRedirectPolicy struct {
+ // variables goes here
+}
+
+func (c *CustomRedirectPolicy) Apply(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}
+
+// Registering in resty
+client.SetRedirectPolicy(CustomRedirectPolicy{/* initialize variables */})
+```
+
+#### Custom Root Certificates and Client Certificates
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Custom Root certificates, just supply .pem file.
+// you can add one or more root certificates, its get appended
+client.SetRootCertificate("/path/to/root/pemFile1.pem")
+client.SetRootCertificate("/path/to/root/pemFile2.pem")
+// ... and so on!
+
+// Adding Client Certificates, you add one or more certificates
+// Sample for creating certificate object
+// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
+cert1, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key")
+if err != nil {
+ log.Fatalf("ERROR client certificate: %s", err)
+}
+// ...
+
+// You add one or more certificates
+client.SetCertificates(cert1, cert2, cert3)
+```
+
+#### Custom Root Certificates and Client Certificates from string
+
+```go
+// Custom Root certificates from string
+// You can pass you certificates through env variables as strings
+// you can add one or more root certificates, its get appended
+client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
+client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
+// ... and so on!
+
+// Adding Client Certificates, you add one or more certificates
+// Sample for creating certificate object
+// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
+cert1, err := tls.X509KeyPair([]byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"), []byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"))
+if err != nil {
+ log.Fatalf("ERROR client certificate: %s", err)
+}
+// ...
+
+// You add one or more certificates
+client.SetCertificates(cert1, cert2, cert3)
+```
+
+#### Proxy Settings
+
+Default `Go` supports Proxy via environment variable `HTTP_PROXY`. Resty provides support via `SetProxy` & `RemoveProxy`.
+Choose as per your need.
+
+**Client Level Proxy** settings applied to all the request
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting a Proxy URL and Port
+client.SetProxy("http://proxyserver:8888")
+
+// Want to remove proxy setting
+client.RemoveProxy()
+```
+
+#### Retries
+
+Resty uses [backoff](http://www.awsarchitectureblog.com/2015/03/backoff.html)
+to increase retry intervals after each attempt.
+
+Usage example:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Retries are configured per client
+client.
+ // Set retry count to non zero to enable retries
+ SetRetryCount(3).
+ // You can override initial retry wait time.
+ // Default is 100 milliseconds.
+ SetRetryWaitTime(5 * time.Second).
+ // MaxWaitTime can be overridden as well.
+ // Default is 2 seconds.
+ SetRetryMaxWaitTime(20 * time.Second).
+ // SetRetryAfter sets callback to calculate wait time between retries.
+ // Default (nil) implies exponential backoff with jitter
+ SetRetryAfter(func(client *resty.Client, resp *resty.Response) (time.Duration, error) {
+ return 0, errors.New("quota exceeded")
+ })
+```
+
+By default, resty will retry requests that return a non-nil error during execution.
+Therefore, the above setup will result in resty retrying requests with non-nil errors up to 3 times,
+with the delay increasing after each attempt.
+
+You can optionally provide client with [custom retry conditions](https://pkg.go.dev/github.com/go-resty/resty/v2#RetryConditionFunc):
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.AddRetryCondition(
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ func(r *resty.Response, err error) bool {
+ return r.StatusCode() == http.StatusTooManyRequests
+ },
+)
+```
+
+The above example will make resty retry requests that end with a `429 Too Many Requests` status code.
+It's important to note that when you specify conditions using `AddRetryCondition`,
+it will override the default retry behavior, which retries on errors encountered during the request.
+If you want to retry on errors encountered during the request, similar to the default behavior,
+you'll need to configure it as follows:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.AddRetryCondition(
+ func(r *resty.Response, err error) bool {
+ // Including "err != nil" emulates the default retry behavior for errors encountered during the request.
+ return err != nil || r.StatusCode() == http.StatusTooManyRequests
+ },
+)
+```
+
+Multiple retry conditions can be added.
+Note that if multiple conditions are specified, a retry will occur if any of the conditions are met.
+
+It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios
+implemented. [Reference](retry_test.go).
+
+#### Allow GET request with Payload
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Allow GET request with Payload. This is disabled by default.
+client.SetAllowGetMethodPayload(true)
+```
+
+#### Wanna Multiple Clients
+
+```go
+// Here you go!
+// Client 1
+client1 := resty.New()
+client1.R().Get("http://httpbin.org")
+// ...
+
+// Client 2
+client2 := resty.New()
+client2.R().Head("http://httpbin.org")
+// ...
+
+// Bend it as per your need!!!
+```
+
+#### Remaining Client Settings & its Options
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Unique settings at Client level
+//--------------------------------
+// Enable debug mode
+client.SetDebug(true)
+
+// Assign Client TLSClientConfig
+// One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+
+// or One can disable security check (https)
+client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+
+// Set client timeout as per your need
+client.SetTimeout(1 * time.Minute)
+
+
+// You can override all below settings and options at request level if you want to
+//--------------------------------------------------------------------------------
+// Host URL for all request. So you can use relative URL in the request
+client.SetHostURL("http://httpbin.org")
+
+// Headers for all request
+client.SetHeader("Accept", "application/json")
+client.SetHeaders(map[string]string{
+ "Content-Type": "application/json",
+ "User-Agent": "My custom User Agent String",
+ })
+
+// Cookies for all request
+client.SetCookie(&http.Cookie{
+ Name:"go-resty",
+ Value:"This is cookie value",
+ Path: "/",
+ Domain: "sample.com",
+ MaxAge: 36000,
+ HttpOnly: true,
+ Secure: false,
+ })
+client.SetCookies(cookies)
+
+// URL query parameters for all request
+client.SetQueryParam("user_id", "00001")
+client.SetQueryParams(map[string]string{ // sample of those who use this manner
+ "api_key": "api-key-here",
+ "api_secret": "api-secret",
+ })
+client.R().SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+
+// Form data for all request. Typically used with POST and PUT
+client.SetFormData(map[string]string{
+ "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+ })
+
+// Basic Auth for all request
+client.SetBasicAuth("myuser", "mypass")
+
+// Bearer Auth Token for all request
+client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+
+// Enabling Content length value for all request
+client.SetContentLength(true)
+
+// Registering global Error object structure for JSON/XML request
+client.SetError(&Error{}) // or resty.SetError(Error{})
+```
+
+#### Unix Socket
+
+```go
+unixSocket := "/var/run/my_socket.sock"
+
+// Create a Go's http.Transport so we can set it in resty.
+transport := http.Transport{
+ Dial: func(_, _ string) (net.Conn, error) {
+ return net.Dial("unix", unixSocket)
+ },
+}
+
+// Create a Resty Client
+client := resty.New()
+
+// Set the previous transport that we created, set the scheme of the communication to the
+// socket and set the unixSocket as the HostURL.
+client.SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket)
+
+// No need to write the host's URL on the request, just the path.
+client.R().Get("http://localhost/index.html")
+```
+
+#### Bazel Support
+
+Resty can be built, tested and depended upon via [Bazel](https://bazel.build).
+For example, to run all tests:
+
+```shell
+bazel test :resty_test
+```
+
+#### Mocking http requests using [httpmock](https://github.com/jarcoal/httpmock) library
+
+In order to mock the http requests when testing your application you
+could use the `httpmock` library.
+
+When using the default resty client, you should pass the client to the library as follow:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Get the underlying HTTP Client and set it to Mock
+httpmock.ActivateNonDefault(client.GetClient())
+```
+
+More detailed example of mocking resty http requests using ginko could be found [here](https://github.com/jarcoal/httpmock#ginkgo--resty-example).
+
+## Versioning
+
+Resty releases versions according to [Semantic Versioning](http://semver.org)
+
+ * Resty v2 does not use `gopkg.in` service for library versioning.
+ * Resty fully adapted to `go mod` capabilities since `v1.10.0` release.
+ * Resty v1 series was using `gopkg.in` to provide versioning. `gopkg.in/resty.vX` points to appropriate tagged versions; `X` denotes version series number and it's a stable release for production use. For e.g. `gopkg.in/resty.v0`.
+ * Development takes place at the master branch. Although the code in master should always compile and test successfully, it might break API's. I aim to maintain backwards compatibility, but sometimes API's and behavior might be changed to fix a bug.
+
+## Contribution
+
+I would welcome your contribution! If you find any improvement or issue you want to fix, feel free to send a pull request, I like pull requests that include test cases for fix/enhancement. I have done my best to bring pretty good code coverage. Feel free to write tests.
+
+BTW, I'd like to know what you think about `Resty`. Kindly open an issue or send me an email; it'd mean a lot to me.
+
+## Creator
+
+[Jeevanandam M.](https://github.com/jeevatkm) (jeeva@myjeeva.com)
+
+## Core Team
+
+Have a look on [Members](https://github.com/orgs/go-resty/people) page.
+
+## Contributors
+
+Have a look on [Contributors](https://github.com/go-resty/resty/graphs/contributors) page.
+
+## License
+
+Resty released under MIT license, refer [LICENSE](LICENSE) file.
diff --git a/vendor/github.com/go-resty/resty/v2/WORKSPACE b/vendor/github.com/go-resty/resty/v2/WORKSPACE
new file mode 100644
index 0000000..9ef03e9
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/WORKSPACE
@@ -0,0 +1,31 @@
+workspace(name = "resty")
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
+ ],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
+
+go_rules_dependencies()
+
+go_register_toolchains(version = "1.16")
+
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
+
+gazelle_dependencies()
diff --git a/vendor/github.com/go-resty/resty/v2/client.go b/vendor/github.com/go-resty/resty/v2/client.go
new file mode 100644
index 0000000..446ba85
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/client.go
@@ -0,0 +1,1391 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net/http"
+ "net/url"
+ "os"
+ "reflect"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // MethodGet HTTP method
+ MethodGet = "GET"
+
+ // MethodPost HTTP method
+ MethodPost = "POST"
+
+ // MethodPut HTTP method
+ MethodPut = "PUT"
+
+ // MethodDelete HTTP method
+ MethodDelete = "DELETE"
+
+ // MethodPatch HTTP method
+ MethodPatch = "PATCH"
+
+ // MethodHead HTTP method
+ MethodHead = "HEAD"
+
+ // MethodOptions HTTP method
+ MethodOptions = "OPTIONS"
+)
+
+var (
+ hdrUserAgentKey = http.CanonicalHeaderKey("User-Agent")
+ hdrAcceptKey = http.CanonicalHeaderKey("Accept")
+ hdrContentTypeKey = http.CanonicalHeaderKey("Content-Type")
+ hdrContentLengthKey = http.CanonicalHeaderKey("Content-Length")
+ hdrContentEncodingKey = http.CanonicalHeaderKey("Content-Encoding")
+ hdrLocationKey = http.CanonicalHeaderKey("Location")
+ hdrAuthorizationKey = http.CanonicalHeaderKey("Authorization")
+ hdrWwwAuthenticateKey = http.CanonicalHeaderKey("WWW-Authenticate")
+
+ plainTextType = "text/plain; charset=utf-8"
+ jsonContentType = "application/json"
+ formContentType = "application/x-www-form-urlencoded"
+
+ jsonCheck = regexp.MustCompile(`(?i:(application|text)/(.*json.*)(;|$))`)
+ xmlCheck = regexp.MustCompile(`(?i:(application|text)/(.*xml.*)(;|$))`)
+
+ hdrUserAgentValue = "go-resty/" + Version + " (https://github.com/go-resty/resty)"
+ bufPool = &sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}
+)
+
+type (
+ // RequestMiddleware type is for request middleware, called before a request is sent
+ RequestMiddleware func(*Client, *Request) error
+
+ // ResponseMiddleware type is for response middleware, called after a response has been received
+ ResponseMiddleware func(*Client, *Response) error
+
+ // PreRequestHook type is for the request hook, called right before the request is sent
+ PreRequestHook func(*Client, *http.Request) error
+
+ // RequestLogCallback type is for request logs, called before the request is logged
+ RequestLogCallback func(*RequestLog) error
+
+ // ResponseLogCallback type is for response logs, called before the response is logged
+ ResponseLogCallback func(*ResponseLog) error
+
+ // ErrorHook type is for reacting to request errors, called after all retries were attempted
+ ErrorHook func(*Request, error)
+
+ // SuccessHook type is for reacting to request success
+ SuccessHook func(*Client, *Response)
+)
+
+// Client struct is used to create Resty client with client level settings,
+// these settings are applicable to all the request raised from the client.
+//
+// Resty also provides an options to override most of the client settings
+// at request level.
+type Client struct {
+ BaseURL string
+ HostURL string // Deprecated: use BaseURL instead. To be removed in v3.0.0 release.
+ QueryParam url.Values
+ FormData url.Values
+ PathParams map[string]string
+ RawPathParams map[string]string
+ Header http.Header
+ UserInfo *User
+ Token string
+ AuthScheme string
+ Cookies []*http.Cookie
+ Error reflect.Type
+ Debug bool
+ DisableWarn bool
+ AllowGetMethodPayload bool
+ RetryCount int
+ RetryWaitTime time.Duration
+ RetryMaxWaitTime time.Duration
+ RetryConditions []RetryConditionFunc
+ RetryHooks []OnRetryFunc
+ RetryAfter RetryAfterFunc
+ RetryResetReaders bool
+ JSONMarshal func(v interface{}) ([]byte, error)
+ JSONUnmarshal func(data []byte, v interface{}) error
+ XMLMarshal func(v interface{}) ([]byte, error)
+ XMLUnmarshal func(data []byte, v interface{}) error
+
+ // HeaderAuthorizationKey is used to set/access Request Authorization header
+ // value when `SetAuthToken` option is used.
+ HeaderAuthorizationKey string
+
+ jsonEscapeHTML bool
+ setContentLength bool
+ closeConnection bool
+ notParseResponse bool
+ trace bool
+ debugBodySizeLimit int64
+ outputDirectory string
+ scheme string
+ log Logger
+ httpClient *http.Client
+ proxyURL *url.URL
+ beforeRequest []RequestMiddleware
+ udBeforeRequest []RequestMiddleware
+ udBeforeRequestLock sync.RWMutex
+ preReqHook PreRequestHook
+ successHooks []SuccessHook
+ afterResponse []ResponseMiddleware
+ afterResponseLock sync.RWMutex
+ requestLog RequestLogCallback
+ responseLog ResponseLogCallback
+ errorHooks []ErrorHook
+ invalidHooks []ErrorHook
+ panicHooks []ErrorHook
+ rateLimiter RateLimiter
+}
+
+// User type is to hold an username and password information
+type User struct {
+ Username, Password string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client methods
+//___________________________________
+
+// SetHostURL method is to set Host URL in the client instance. It will be used with request
+// raised from this client with relative URL
+//
+// // Setting HTTP address
+// client.SetHostURL("http://myjeeva.com")
+//
+// // Setting HTTPS address
+// client.SetHostURL("https://myjeeva.com")
+//
+// Deprecated: use SetBaseURL instead. To be removed in v3.0.0 release.
+func (c *Client) SetHostURL(url string) *Client {
+ c.SetBaseURL(url)
+ return c
+}
+
+// SetBaseURL method is to set Base URL in the client instance. It will be used with request
+// raised from this client with relative URL
+//
+// // Setting HTTP address
+// client.SetBaseURL("http://myjeeva.com")
+//
+// // Setting HTTPS address
+// client.SetBaseURL("https://myjeeva.com")
+//
+// Since v2.7.0
+func (c *Client) SetBaseURL(url string) *Client {
+ c.BaseURL = strings.TrimRight(url, "/")
+ c.HostURL = c.BaseURL
+ return c
+}
+
+// SetHeader method sets a single header field and its value in the client instance.
+// These headers will be applied to all requests raised from this client instance.
+// Also it can be overridden at request level header options.
+//
+// See `Request.SetHeader` or `Request.SetHeaders`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+func (c *Client) SetHeader(header, value string) *Client {
+ c.Header.Set(header, value)
+ return c
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the client instance.
+// These headers will be applied to all requests raised from this client instance. Also it can be
+// overridden at request level headers options.
+//
+// See `Request.SetHeaders` or `Request.SetHeader`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+func (c *Client) SetHeaders(headers map[string]string) *Client {
+ for h, v := range headers {
+ c.Header.Set(h, v)
+ }
+ return c
+}
+
+// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
+//
+// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
+//
+// client.R().
+// SetHeaderVerbatim("all_lowercase", "available").
+// SetHeaderVerbatim("UPPERCASE", "available")
+//
+// Also you can override header value, which was set at client instance level.
+//
+// Since v2.6.0
+func (c *Client) SetHeaderVerbatim(header, value string) *Client {
+ c.Header[header] = []string{value}
+ return c
+}
+
+// SetCookieJar method sets custom http.CookieJar in the resty client. Its way to override default.
+//
+// For Example: sometimes we don't want to save cookies in api contacting, we can remove the default
+// CookieJar in resty client.
+//
+// client.SetCookieJar(nil)
+func (c *Client) SetCookieJar(jar http.CookieJar) *Client {
+ c.httpClient.Jar = jar
+ return c
+}
+
+// SetCookie method appends a single cookie in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+//
+// client.SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+func (c *Client) SetCookie(hc *http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, hc)
+ return c
+}
+
+// SetCookies method sets an array of cookies in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+//
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty
+// client.SetCookies(cookies)
+func (c *Client) SetCookies(cs []*http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, cs...)
+ return c
+}
+
+// SetQueryParam method sets single parameter and its value in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from
+// this client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParam` or `Request.SetQueryParams`.
+//
+// client.
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+func (c *Client) SetQueryParam(param, value string) *Client {
+ c.QueryParam.Set(param, value)
+ return c
+}
+
+// SetQueryParams method sets multiple parameters and their values at one go in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from this
+// client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParams` or `Request.SetQueryParam`.
+//
+// client.SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+func (c *Client) SetQueryParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetQueryParam(p, v)
+ }
+ return c
+}
+
+// SetFormData method sets Form parameters and their values in the client instance.
+// It's applicable only HTTP method `POST` and `PUT` and request content type would be set as
+// `application/x-www-form-urlencoded`. These form data will be added to all the request raised from
+// this client instance. Also it can be overridden at request level form data.
+//
+// See `Request.SetFormData`.
+//
+// client.SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+func (c *Client) SetFormData(data map[string]string) *Client {
+ for k, v := range data {
+ c.FormData.Set(k, v)
+ }
+ return c
+}
+
+// SetBasicAuth method sets the basic authentication header in the HTTP request. For Example:
+//
+// Authorization: Basic
+//
+// For Example: To set the header for username "go-resty" and password "welcome"
+//
+// client.SetBasicAuth("go-resty", "welcome")
+//
+// This basic auth information gets added to all the request raised from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetBasicAuth`.
+func (c *Client) SetBasicAuth(username, password string) *Client {
+ c.UserInfo = &User{Username: username, Password: password}
+ return c
+}
+
+// SetAuthToken method sets the auth token of the `Authorization` header for all HTTP requests.
+// The default auth scheme is `Bearer`, it can be customized with the method `SetAuthScheme`. For Example:
+//
+// Authorization:
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This auth token gets added to all the requests raised from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetAuthToken`.
+func (c *Client) SetAuthToken(token string) *Client {
+ c.Token = token
+ return c
+}
+
+// SetAuthScheme method sets the auth scheme type in the HTTP request. For Example:
+//
+// Authorization:
+//
+// For Example: To set the scheme to use OAuth
+//
+// client.SetAuthScheme("OAuth")
+//
+// This auth scheme gets added to all the requests raised from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// Information about auth schemes can be found in RFC7235 which is linked to below
+// along with the page containing the currently defined official authentication schemes:
+//
+// https://tools.ietf.org/html/rfc7235
+// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
+//
+// See `Request.SetAuthToken`.
+func (c *Client) SetAuthScheme(scheme string) *Client {
+ c.AuthScheme = scheme
+ return c
+}
+
+// SetDigestAuth method sets the Digest Access auth scheme for the client. If a server responds with 401 and sends
+// a Digest challenge in the WWW-Authenticate Header, requests will be resent with the appropriate Authorization Header.
+//
+// For Example: To set the Digest scheme with user "Mufasa" and password "Circle Of Life"
+//
+// client.SetDigestAuth("Mufasa", "Circle Of Life")
+//
+// Information about Digest Access Authentication can be found in RFC7616:
+//
+// https://datatracker.ietf.org/doc/html/rfc7616
+//
+// See `Request.SetDigestAuth`.
+func (c *Client) SetDigestAuth(username, password string) *Client {
+ oldTransport := c.httpClient.Transport
+ c.OnBeforeRequest(func(c *Client, _ *Request) error {
+ c.httpClient.Transport = &digestTransport{
+ digestCredentials: digestCredentials{username, password},
+ transport: oldTransport,
+ }
+ return nil
+ })
+ c.OnAfterResponse(func(c *Client, _ *Response) error {
+ c.httpClient.Transport = oldTransport
+ return nil
+ })
+ return c
+}
+
+// R method creates a new request instance, its used for Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) R() *Request {
+ r := &Request{
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+ PathParams: map[string]string{},
+ RawPathParams: map[string]string{},
+ Debug: c.Debug,
+
+ client: c,
+ multipartFiles: []*File{},
+ multipartFields: []*MultipartField{},
+ jsonEscapeHTML: c.jsonEscapeHTML,
+ log: c.log,
+ }
+ return r
+}
+
+// NewRequest is an alias for method `R()`. Creates a new request instance, its used for
+// Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) NewRequest() *Request {
+ return c.R()
+}
+
+// OnBeforeRequest method appends a request middleware into the before request chain.
+// The user defined middlewares get applied before the default Resty request middlewares.
+// After all middlewares have been applied, the request is sent from Resty to the host server.
+//
+// client.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error {
+// // Now you have access to Client and Request instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnBeforeRequest(m RequestMiddleware) *Client {
+ c.udBeforeRequestLock.Lock()
+ defer c.udBeforeRequestLock.Unlock()
+
+ c.udBeforeRequest = append(c.udBeforeRequest, m)
+
+ return c
+}
+
+// OnAfterResponse method appends response middleware into the after response chain.
+// Once we receive response from host server, default Resty response middleware
+// gets applied and then user assigned response middlewares applied.
+//
+// client.OnAfterResponse(func(c *resty.Client, r *resty.Response) error {
+// // Now you have access to Client and Response instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnAfterResponse(m ResponseMiddleware) *Client {
+ c.afterResponseLock.Lock()
+ defer c.afterResponseLock.Unlock()
+
+ c.afterResponse = append(c.afterResponse, m)
+
+ return c
+}
+
+// OnError method adds a callback that will be run whenever a request execution fails.
+// This is called after all retries have been attempted (if any).
+// If there was a response from the server, the error will be wrapped in *ResponseError
+// which has the last response received from the server.
+//
+// client.OnError(func(req *resty.Request, err error) {
+// if v, ok := err.(*resty.ResponseError); ok {
+// // Do something with v.Response
+// }
+// // Log the error, increment a metric, etc...
+// })
+//
+// Out of the OnSuccess, OnError, OnInvalid, OnPanic callbacks, exactly one
+// set will be invoked for each call to Request.Execute() that completes.
+func (c *Client) OnError(h ErrorHook) *Client {
+ c.errorHooks = append(c.errorHooks, h)
+ return c
+}
+
+// OnSuccess method adds a callback that will be run whenever a request execution
+// succeeds. This is called after all retries have been attempted (if any).
+//
+// Out of the OnSuccess, OnError, OnInvalid, OnPanic callbacks, exactly one
+// set will be invoked for each call to Request.Execute() that completes.
+//
+// Since v2.8.0
+func (c *Client) OnSuccess(h SuccessHook) *Client {
+ c.successHooks = append(c.successHooks, h)
+ return c
+}
+
+// OnInvalid method adds a callback that will be run whenever a request execution
+// fails before it starts because the request is invalid.
+//
+// Out of the OnSuccess, OnError, OnInvalid, OnPanic callbacks, exactly one
+// set will be invoked for each call to Request.Execute() that completes.
+//
+// Since v2.8.0
+func (c *Client) OnInvalid(h ErrorHook) *Client {
+ c.invalidHooks = append(c.invalidHooks, h)
+ return c
+}
+
+// OnPanic method adds a callback that will be run whenever a request execution
+// panics.
+//
+// Out of the OnSuccess, OnError, OnInvalid, OnPanic callbacks, exactly one
+// set will be invoked for each call to Request.Execute() that completes.
+// If an OnSuccess, OnError, or OnInvalid callback panics, then the exactly
+// one rule can be violated.
+//
+// Since v2.8.0
+func (c *Client) OnPanic(h ErrorHook) *Client {
+ c.panicHooks = append(c.panicHooks, h)
+ return c
+}
+
+// SetPreRequestHook method sets the given pre-request function into resty client.
+// It is called right before the request is fired.
+//
+// Note: Only one pre-request hook can be registered. Use `client.OnBeforeRequest` for multiple.
+func (c *Client) SetPreRequestHook(h PreRequestHook) *Client {
+ if c.preReqHook != nil {
+ c.log.Warnf("Overwriting an existing pre-request hook: %s", functionName(h))
+ }
+ c.preReqHook = h
+ return c
+}
+
+// SetDebug method enables the debug mode on Resty client. Client logs details of every request and response.
+// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one.
+// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one.
+//
+// client.SetDebug(true)
+//
+// Also it can be enabled at request level for particular request, see `Request.SetDebug`.
+func (c *Client) SetDebug(d bool) *Client {
+ c.Debug = d
+ return c
+}
+
+// SetDebugBodyLimit sets the maximum size for which the response and request body will be logged in debug mode.
+//
+// client.SetDebugBodyLimit(1000000)
+func (c *Client) SetDebugBodyLimit(sl int64) *Client {
+ c.debugBodySizeLimit = sl
+ return c
+}
+
+// OnRequestLog method used to set request log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnRequestLog(rl RequestLogCallback) *Client {
+ if c.requestLog != nil {
+ c.log.Warnf("Overwriting an existing on-request-log callback from=%s to=%s",
+ functionName(c.requestLog), functionName(rl))
+ }
+ c.requestLog = rl
+ return c
+}
+
+// OnResponseLog method used to set response log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnResponseLog(rl ResponseLogCallback) *Client {
+ if c.responseLog != nil {
+ c.log.Warnf("Overwriting an existing on-response-log callback from=%s to=%s",
+ functionName(c.responseLog), functionName(rl))
+ }
+ c.responseLog = rl
+ return c
+}
+
+// SetDisableWarn method disables the warning message on Resty client.
+//
+// For Example: Resty warns the user when BasicAuth used on non-TLS mode.
+//
+// client.SetDisableWarn(true)
+func (c *Client) SetDisableWarn(d bool) *Client {
+ c.DisableWarn = d
+ return c
+}
+
+// SetAllowGetMethodPayload method allows the GET method with payload on Resty client.
+//
+// For Example: Resty allows the user sends request with a payload on HTTP GET method.
+//
+// client.SetAllowGetMethodPayload(true)
+func (c *Client) SetAllowGetMethodPayload(a bool) *Client {
+ c.AllowGetMethodPayload = a
+ return c
+}
+
+// SetLogger method sets given writer for logging Resty request and response details.
+//
+// Compliant to interface `resty.Logger`.
+func (c *Client) SetLogger(l Logger) *Client {
+ c.log = l
+ return c
+}
+
+// SetContentLength method enables the HTTP header `Content-Length` value for every request.
+// By default Resty won't set `Content-Length`.
+//
+// client.SetContentLength(true)
+//
+// Also you have an option to enable for particular request. See `Request.SetContentLength`
+func (c *Client) SetContentLength(l bool) *Client {
+ c.setContentLength = l
+ return c
+}
+
+// SetTimeout method sets timeout for request raised from client.
+//
+// client.SetTimeout(time.Duration(1 * time.Minute))
+func (c *Client) SetTimeout(timeout time.Duration) *Client {
+ c.httpClient.Timeout = timeout
+ return c
+}
+
+// SetError method is to register the global or client common `Error` object into Resty.
+// It is used for automatic unmarshalling if response status code is greater than 399 and
+// content type either JSON or XML. Can be pointer or non-pointer.
+//
+// client.SetError(&Error{})
+// // OR
+// client.SetError(Error{})
+func (c *Client) SetError(err interface{}) *Client {
+ c.Error = typeOf(err)
+ return c
+}
+
+// SetRedirectPolicy method sets the client redirect policy. Resty provides ready to use
+// redirect policies. Wanna create one for yourself refer to `redirect.go`.
+//
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+//
+// // Need multiple redirect policies together
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20), DomainCheckRedirectPolicy("host1.com", "host2.net"))
+func (c *Client) SetRedirectPolicy(policies ...interface{}) *Client {
+ for _, p := range policies {
+ if _, ok := p.(RedirectPolicy); !ok {
+ c.log.Errorf("%v does not implement resty.RedirectPolicy (missing Apply method)",
+ functionName(p))
+ }
+ }
+
+ c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ for _, p := range policies {
+ if err := p.(RedirectPolicy).Apply(req, via); err != nil {
+ return err
+ }
+ }
+ return nil // looks good, go ahead
+ }
+
+ return c
+}
+
+// SetRetryCount method enables retry on Resty client and allows you
+// to set no. of retry count. Resty uses a Backoff mechanism.
+func (c *Client) SetRetryCount(count int) *Client {
+ c.RetryCount = count
+ return c
+}
+
+// SetRetryWaitTime method sets default wait time to sleep before retrying
+// request.
+//
+// Default is 100 milliseconds.
+func (c *Client) SetRetryWaitTime(waitTime time.Duration) *Client {
+ c.RetryWaitTime = waitTime
+ return c
+}
+
+// SetRetryMaxWaitTime method sets max wait time to sleep before retrying
+// request.
+//
+// Default is 2 seconds.
+func (c *Client) SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client {
+ c.RetryMaxWaitTime = maxWaitTime
+ return c
+}
+
+// SetRetryAfter sets callback to calculate wait time between retries.
+// Default (nil) implies exponential backoff with jitter
+func (c *Client) SetRetryAfter(callback RetryAfterFunc) *Client {
+ c.RetryAfter = callback
+ return c
+}
+
+// SetJSONMarshaler method sets the JSON marshaler function to marshal the request body.
+// By default, Resty uses `encoding/json` package to marshal the request body.
+//
+// Since v2.8.0
+func (c *Client) SetJSONMarshaler(marshaler func(v interface{}) ([]byte, error)) *Client {
+ c.JSONMarshal = marshaler
+ return c
+}
+
+// SetJSONUnmarshaler method sets the JSON unmarshaler function to unmarshal the response body.
+// By default, Resty uses `encoding/json` package to unmarshal the response body.
+//
+// Since v2.8.0
+func (c *Client) SetJSONUnmarshaler(unmarshaler func(data []byte, v interface{}) error) *Client {
+ c.JSONUnmarshal = unmarshaler
+ return c
+}
+
+// SetXMLMarshaler method sets the XML marshaler function to marshal the request body.
+// By default, Resty uses `encoding/xml` package to marshal the request body.
+//
+// Since v2.8.0
+func (c *Client) SetXMLMarshaler(marshaler func(v interface{}) ([]byte, error)) *Client {
+ c.XMLMarshal = marshaler
+ return c
+}
+
+// SetXMLUnmarshaler method sets the XML unmarshaler function to unmarshal the response body.
+// By default, Resty uses `encoding/xml` package to unmarshal the response body.
+//
+// Since v2.8.0
+func (c *Client) SetXMLUnmarshaler(unmarshaler func(data []byte, v interface{}) error) *Client {
+ c.XMLUnmarshal = unmarshaler
+ return c
+}
+
+// AddRetryCondition method adds a retry condition function to array of functions
+// that are checked to determine if the request is retried. The request will
+// retry if any of the functions return true and error is nil.
+//
+// Note: These retry conditions are applied on all Request made using this Client.
+// For Request specific retry conditions check *Request.AddRetryCondition
+func (c *Client) AddRetryCondition(condition RetryConditionFunc) *Client {
+ c.RetryConditions = append(c.RetryConditions, condition)
+ return c
+}
+
+// AddRetryAfterErrorCondition adds the basic condition of retrying after encountering
+// an error from the http response
+//
+// Since v2.6.0
+func (c *Client) AddRetryAfterErrorCondition() *Client {
+ c.AddRetryCondition(func(response *Response, err error) bool {
+ return response.IsError()
+ })
+ return c
+}
+
+// AddRetryHook adds a side-effecting retry hook to an array of hooks
+// that will be executed on each retry.
+//
+// Since v2.6.0
+func (c *Client) AddRetryHook(hook OnRetryFunc) *Client {
+ c.RetryHooks = append(c.RetryHooks, hook)
+ return c
+}
+
+// SetRetryResetReaders method enables the Resty client to seek the start of all
+// file readers given as multipart files, if the given object implements `io.ReadSeeker`.
+//
+// Since ...
+func (c *Client) SetRetryResetReaders(b bool) *Client {
+ c.RetryResetReaders = b
+ return c
+}
+
+// SetTLSClientConfig method sets TLSClientConfig for underling client Transport.
+//
+// For Example:
+//
+// // One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+// client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+//
+// // or One can disable security check (https)
+// client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+//
+// Note: This method overwrites existing `TLSClientConfig`.
+func (c *Client) SetTLSClientConfig(config *tls.Config) *Client {
+ transport, err := c.Transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ transport.TLSClientConfig = config
+ return c
+}
+
+// SetProxy method sets the Proxy URL and Port for Resty client.
+//
+// client.SetProxy("http://proxyserver:8888")
+//
+// OR Without this `SetProxy` method, you could also set Proxy via environment variable.
+//
+// Refer to godoc `http.ProxyFromEnvironment`.
+func (c *Client) SetProxy(proxyURL string) *Client {
+ transport, err := c.Transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ pURL, err := url.Parse(proxyURL)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ c.proxyURL = pURL
+ transport.Proxy = http.ProxyURL(c.proxyURL)
+ return c
+}
+
+// RemoveProxy method removes the proxy configuration from Resty client
+//
+// client.RemoveProxy()
+func (c *Client) RemoveProxy() *Client {
+ transport, err := c.Transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ c.proxyURL = nil
+ transport.Proxy = nil
+ return c
+}
+
+// SetCertificates method helps to set client certificates into Resty conveniently.
+func (c *Client) SetCertificates(certs ...tls.Certificate) *Client {
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ config.Certificates = append(config.Certificates, certs...)
+ return c
+}
+
+// SetRootCertificate method helps to add one or more root certificates into Resty client
+//
+// client.SetRootCertificate("/path/to/root/pemFile.pem")
+func (c *Client) SetRootCertificate(pemFilePath string) *Client {
+ rootPemData, err := os.ReadFile(pemFilePath)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ if config.RootCAs == nil {
+ config.RootCAs = x509.NewCertPool()
+ }
+
+ config.RootCAs.AppendCertsFromPEM(rootPemData)
+ return c
+}
+
+// SetRootCertificateFromString method helps to add one or more root certificates into Resty client
+//
+// client.SetRootCertificateFromString("pem file content")
+func (c *Client) SetRootCertificateFromString(pemContent string) *Client {
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ if config.RootCAs == nil {
+ config.RootCAs = x509.NewCertPool()
+ }
+
+ config.RootCAs.AppendCertsFromPEM([]byte(pemContent))
+ return c
+}
+
+// SetOutputDirectory method sets output directory for saving HTTP response into file.
+// If the output directory not exists then resty creates one. This setting is optional one,
+// if you're planning using absolute path in `Request.SetOutput` and can used together.
+//
+// client.SetOutputDirectory("/save/http/response/here")
+func (c *Client) SetOutputDirectory(dirPath string) *Client {
+ c.outputDirectory = dirPath
+ return c
+}
+
+// SetRateLimiter sets an optional `RateLimiter`. If set the rate limiter will control
+// all requests made with this client.
+//
+// Since v2.9.0
+func (c *Client) SetRateLimiter(rl RateLimiter) *Client {
+ c.rateLimiter = rl
+ return c
+}
+
+// SetTransport method sets custom `*http.Transport` or any `http.RoundTripper`
+// compatible interface implementation in the resty client.
+//
+// Note:
+//
+// - If transport is not type of `*http.Transport` then you may not be able to
+// take advantage of some of the Resty client settings.
+//
+// - It overwrites the Resty client transport instance and it's configurations.
+//
+// transport := &http.Transport{
+// // something like Proxying to httptest.Server, etc...
+// Proxy: func(req *http.Request) (*url.URL, error) {
+// return url.Parse(server.URL)
+// },
+// }
+//
+// client.SetTransport(transport)
+func (c *Client) SetTransport(transport http.RoundTripper) *Client {
+ if transport != nil {
+ c.httpClient.Transport = transport
+ }
+ return c
+}
+
+// SetScheme method sets custom scheme in the Resty client. It's way to override default.
+//
+// client.SetScheme("http")
+func (c *Client) SetScheme(scheme string) *Client {
+ if !IsStringEmpty(scheme) {
+ c.scheme = strings.TrimSpace(scheme)
+ }
+ return c
+}
+
+// SetCloseConnection method sets variable `Close` in http request struct with the given
+// value. More info: https://golang.org/src/net/http/request.go
+func (c *Client) SetCloseConnection(close bool) *Client {
+ c.closeConnection = close
+ return c
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (c *Client) SetDoNotParseResponse(parse bool) *Client {
+ c.notParseResponse = parse
+ return c
+}
+
+// SetPathParam method sets single URL path key-value pair in the
+// Resty client instance.
+//
+// client.SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+//
+// It replaces the value of the key while composing the request URL.
+// The value will be escaped using `url.PathEscape` function.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+func (c *Client) SetPathParam(param, value string) *Client {
+ c.PathParams[param] = value
+ return c
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty client instance.
+//
+// client.SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// "path": "groups/developers",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/{path}/details
+// Composed URL - /v1/users/sample@sample.com/100002/groups%2Fdevelopers/details
+//
+// It replaces the value of the key while composing the request URL.
+// The values will be escaped using `url.PathEscape` function.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+func (c *Client) SetPathParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetPathParam(p, v)
+ }
+ return c
+}
+
+// SetRawPathParam method sets single URL path key-value pair in the
+// Resty client instance.
+//
+// client.SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+//
+// client.SetPathParam("path", "groups/developers")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/groups%2Fdevelopers/details
+//
+// It replaces the value of the key while composing the request URL.
+// The value will be used as it is and will not be escaped.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+//
+// Since v2.8.0
+func (c *Client) SetRawPathParam(param, value string) *Client {
+ c.RawPathParams[param] = value
+ return c
+}
+
+// SetRawPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty client instance.
+//
+// client.SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// "path": "groups/developers",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/{path}/details
+// Composed URL - /v1/users/sample@sample.com/100002/groups/developers/details
+//
+// It replaces the value of the key while composing the request URL.
+// The values will be used as they are and will not be escaped.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+//
+// Since v2.8.0
+func (c *Client) SetRawPathParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetRawPathParam(p, v)
+ }
+ return c
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (c *Client) SetJSONEscapeHTML(b bool) *Client {
+ c.jsonEscapeHTML = b
+ return c
+}
+
+// EnableTrace method enables the Resty client trace for the requests fired from
+// the client using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New().EnableTrace()
+//
+// resp, err := client.R().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// Also `Request.EnableTrace` available too to get trace info for single request.
+//
+// Since v2.0.0
+func (c *Client) EnableTrace() *Client {
+ c.trace = true
+ return c
+}
+
+// DisableTrace method disables the Resty client trace. Refer to `Client.EnableTrace`.
+//
+// Since v2.0.0
+func (c *Client) DisableTrace() *Client {
+ c.trace = false
+ return c
+}
+
+// IsProxySet method returns the true is proxy is set from resty client otherwise
+// false. By default proxy is set from environment, refer to `http.ProxyFromEnvironment`.
+func (c *Client) IsProxySet() bool {
+ return c.proxyURL != nil
+}
+
+// GetClient method returns the current `http.Client` used by the resty client.
+func (c *Client) GetClient() *http.Client {
+ return c.httpClient
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client Unexported methods
+//_______________________________________________________________________
+
+// Executes method executes the given `Request` object and returns response
+// error.
+func (c *Client) execute(req *Request) (*Response, error) {
+ // Lock the user-defined pre-request hooks.
+ c.udBeforeRequestLock.RLock()
+ defer c.udBeforeRequestLock.RUnlock()
+
+ // Lock the post-request hooks.
+ c.afterResponseLock.RLock()
+ defer c.afterResponseLock.RUnlock()
+
+ // Apply Request middleware
+ var err error
+
+ // user defined on before request methods
+ // to modify the *resty.Request object
+ for _, f := range c.udBeforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ // If there is a rate limiter set for this client, the Execute call
+ // will return an error if the rate limit is exceeded.
+ if req.client.rateLimiter != nil {
+ if !req.client.rateLimiter.Allow() {
+ return nil, wrapNoRetryErr(ErrRateLimitExceeded)
+ }
+ }
+
+ // resty middlewares
+ for _, f := range c.beforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ if hostHeader := req.Header.Get("Host"); hostHeader != "" {
+ req.RawRequest.Host = hostHeader
+ }
+
+ // call pre-request if defined
+ if c.preReqHook != nil {
+ if err = c.preReqHook(c, req.RawRequest); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ if err = requestLogger(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+
+ req.RawRequest.Body = newRequestBodyReleaser(req.RawRequest.Body, req.bodyBuf)
+
+ req.Time = time.Now()
+ resp, err := c.httpClient.Do(req.RawRequest)
+
+ response := &Response{
+ Request: req,
+ RawResponse: resp,
+ }
+
+ if err != nil || req.notParseResponse || c.notParseResponse {
+ response.setReceivedAt()
+ return response, err
+ }
+
+ if !req.isSaveResponse {
+ defer closeq(resp.Body)
+ body := resp.Body
+
+ // GitHub #142 & #187
+ if strings.EqualFold(resp.Header.Get(hdrContentEncodingKey), "gzip") && resp.ContentLength != 0 {
+ if _, ok := body.(*gzip.Reader); !ok {
+ body, err = gzip.NewReader(body)
+ if err != nil {
+ response.setReceivedAt()
+ return response, err
+ }
+ defer closeq(body)
+ }
+ }
+
+ if response.body, err = io.ReadAll(body); err != nil {
+ response.setReceivedAt()
+ return response, err
+ }
+
+ response.size = int64(len(response.body))
+ }
+
+ response.setReceivedAt() // after we read the body
+
+ // Apply Response middleware
+ for _, f := range c.afterResponse {
+ if err = f(c, response); err != nil {
+ break
+ }
+ }
+
+ return response, wrapNoRetryErr(err)
+}
+
+// getting TLS client config if not exists then create one
+func (c *Client) tlsConfig() (*tls.Config, error) {
+ transport, err := c.Transport()
+ if err != nil {
+ return nil, err
+ }
+ if transport.TLSClientConfig == nil {
+ transport.TLSClientConfig = &tls.Config{}
+ }
+ return transport.TLSClientConfig, nil
+}
+
+// Transport method returns `*http.Transport` currently in use or error
+// in case currently used `transport` is not a `*http.Transport`.
+//
+// Since v2.8.0 become exported method.
+func (c *Client) Transport() (*http.Transport, error) {
+ if transport, ok := c.httpClient.Transport.(*http.Transport); ok {
+ return transport, nil
+ }
+ return nil, errors.New("current transport is not an *http.Transport instance")
+}
+
+// just an internal helper method
+func (c *Client) outputLogTo(w io.Writer) *Client {
+ c.log.(*logger).l.SetOutput(w)
+ return c
+}
+
+// ResponseError is a wrapper for including the server response with an error.
+// Neither the err nor the response should be nil.
+type ResponseError struct {
+ Response *Response
+ Err error
+}
+
+func (e *ResponseError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *ResponseError) Unwrap() error {
+ return e.Err
+}
+
+// Helper to run errorHooks hooks.
+// It wraps the error in a ResponseError if the resp is not nil
+// so hooks can access it.
+func (c *Client) onErrorHooks(req *Request, resp *Response, err error) {
+ if err != nil {
+ if resp != nil { // wrap with ResponseError
+ err = &ResponseError{Response: resp, Err: err}
+ }
+ for _, h := range c.errorHooks {
+ h(req, err)
+ }
+ } else {
+ for _, h := range c.successHooks {
+ h(c, resp)
+ }
+ }
+}
+
+// Helper to run panicHooks hooks.
+func (c *Client) onPanicHooks(req *Request, err error) {
+ for _, h := range c.panicHooks {
+ h(req, err)
+ }
+}
+
+// Helper to run invalidHooks hooks.
+func (c *Client) onInvalidHooks(req *Request, err error) {
+ for _, h := range c.invalidHooks {
+ h(req, err)
+ }
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// File struct and its methods
+//_______________________________________________________________________
+
+// File struct represent file information for multipart request
+type File struct {
+ Name string
+ ParamName string
+ io.Reader
+}
+
+// String returns string value of current file details
+func (f *File) String() string {
+ return fmt.Sprintf("ParamName: %v; FileName: %v", f.ParamName, f.Name)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// MultipartField struct
+//_______________________________________________________________________
+
+// MultipartField struct represent custom data part for multipart request
+type MultipartField struct {
+ Param string
+ FileName string
+ ContentType string
+ io.Reader
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Unexported package methods
+//_______________________________________________________________________
+
+func createClient(hc *http.Client) *Client {
+ if hc.Transport == nil {
+ hc.Transport = createTransport(nil)
+ }
+
+ c := &Client{ // not setting lang default values
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+ RetryWaitTime: defaultWaitTime,
+ RetryMaxWaitTime: defaultMaxWaitTime,
+ PathParams: make(map[string]string),
+ RawPathParams: make(map[string]string),
+ JSONMarshal: json.Marshal,
+ JSONUnmarshal: json.Unmarshal,
+ XMLMarshal: xml.Marshal,
+ XMLUnmarshal: xml.Unmarshal,
+ HeaderAuthorizationKey: http.CanonicalHeaderKey("Authorization"),
+
+ jsonEscapeHTML: true,
+ httpClient: hc,
+ debugBodySizeLimit: math.MaxInt32,
+ }
+
+ // Logger
+ c.SetLogger(createLogger())
+
+ // default before request middlewares
+ c.beforeRequest = []RequestMiddleware{
+ parseRequestURL,
+ parseRequestHeader,
+ parseRequestBody,
+ createHTTPRequest,
+ addCredentials,
+ }
+
+ // user defined request middlewares
+ c.udBeforeRequest = []RequestMiddleware{}
+
+ // default after response middlewares
+ c.afterResponse = []ResponseMiddleware{
+ responseLogger,
+ parseResponseBody,
+ saveResponseIntoFile,
+ }
+
+ return c
+}
diff --git a/vendor/github.com/go-resty/resty/v2/digest.go b/vendor/github.com/go-resty/resty/v2/digest.go
new file mode 100644
index 0000000..ba099ad
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/digest.go
@@ -0,0 +1,281 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com)
+// 2023 Segev Dagan (https://github.com/segevda)
+// All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/sha512"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "net/http"
+ "strings"
+)
+
+var (
+ ErrDigestBadChallenge = errors.New("digest: challenge is bad")
+ ErrDigestCharset = errors.New("digest: unsupported charset")
+ ErrDigestAlgNotSupported = errors.New("digest: algorithm is not supported")
+ ErrDigestQopNotSupported = errors.New("digest: no supported qop in list")
+ ErrDigestNoQop = errors.New("digest: qop must be specified")
+)
+
+var hashFuncs = map[string]func() hash.Hash{
+ "": md5.New,
+ "MD5": md5.New,
+ "MD5-sess": md5.New,
+ "SHA-256": sha256.New,
+ "SHA-256-sess": sha256.New,
+ "SHA-512-256": sha512.New,
+ "SHA-512-256-sess": sha512.New,
+}
+
+type digestCredentials struct {
+ username, password string
+}
+
+type digestTransport struct {
+ digestCredentials
+ transport http.RoundTripper
+}
+
+func (dt *digestTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ // Copy the request, so we don't modify the input.
+ req2 := new(http.Request)
+ *req2 = *req
+ req2.Header = make(http.Header)
+ for k, s := range req.Header {
+ req2.Header[k] = s
+ }
+
+ // Make a request to get the 401 that contains the challenge.
+ resp, err := dt.transport.RoundTrip(req)
+ if err != nil || resp.StatusCode != http.StatusUnauthorized {
+ return resp, err
+ }
+ chal := resp.Header.Get(hdrWwwAuthenticateKey)
+ if chal == "" {
+ return resp, ErrDigestBadChallenge
+ }
+
+ c, err := parseChallenge(chal)
+ if err != nil {
+ return resp, err
+ }
+
+ // Form credentials based on the challenge
+ cr := dt.newCredentials(req2, c)
+ auth, err := cr.authorize()
+ if err != nil {
+ return resp, err
+ }
+ err = resp.Body.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ // Make authenticated request
+ req2.Header.Set(hdrAuthorizationKey, auth)
+ return dt.transport.RoundTrip(req2)
+}
+
+func (dt *digestTransport) newCredentials(req *http.Request, c *challenge) *credentials {
+ return &credentials{
+ username: dt.username,
+ userhash: c.userhash,
+ realm: c.realm,
+ nonce: c.nonce,
+ digestURI: req.URL.RequestURI(),
+ algorithm: c.algorithm,
+ sessionAlg: strings.HasSuffix(c.algorithm, "-sess"),
+ opaque: c.opaque,
+ messageQop: c.qop,
+ nc: 0,
+ method: req.Method,
+ password: dt.password,
+ }
+}
+
+type challenge struct {
+ realm string
+ domain string
+ nonce string
+ opaque string
+ stale string
+ algorithm string
+ qop string
+ userhash string
+}
+
+func parseChallenge(input string) (*challenge, error) {
+ const ws = " \n\r\t"
+ const qs = `"`
+ s := strings.Trim(input, ws)
+ if !strings.HasPrefix(s, "Digest ") {
+ return nil, ErrDigestBadChallenge
+ }
+ s = strings.Trim(s[7:], ws)
+ sl := strings.Split(s, ", ")
+ c := &challenge{}
+ var r []string
+ for i := range sl {
+ r = strings.SplitN(sl[i], "=", 2)
+ if len(r) != 2 {
+ return nil, ErrDigestBadChallenge
+ }
+ switch r[0] {
+ case "realm":
+ c.realm = strings.Trim(r[1], qs)
+ case "domain":
+ c.domain = strings.Trim(r[1], qs)
+ case "nonce":
+ c.nonce = strings.Trim(r[1], qs)
+ case "opaque":
+ c.opaque = strings.Trim(r[1], qs)
+ case "stale":
+ c.stale = r[1]
+ case "algorithm":
+ c.algorithm = r[1]
+ case "qop":
+ c.qop = strings.Trim(r[1], qs)
+ case "charset":
+ if strings.ToUpper(strings.Trim(r[1], qs)) != "UTF-8" {
+ return nil, ErrDigestCharset
+ }
+ case "userhash":
+ c.userhash = strings.Trim(r[1], qs)
+ default:
+ return nil, ErrDigestBadChallenge
+ }
+ }
+ return c, nil
+}
+
+type credentials struct {
+ username string
+ userhash string
+ realm string
+ nonce string
+ digestURI string
+ algorithm string
+ sessionAlg bool
+ cNonce string
+ opaque string
+ messageQop string
+ nc int
+ method string
+ password string
+}
+
+func (c *credentials) authorize() (string, error) {
+ if _, ok := hashFuncs[c.algorithm]; !ok {
+ return "", ErrDigestAlgNotSupported
+ }
+
+ if err := c.validateQop(); err != nil {
+ return "", err
+ }
+
+ resp, err := c.resp()
+ if err != nil {
+ return "", err
+ }
+
+ sl := make([]string, 0, 10)
+ if c.userhash == "true" {
+ // RFC 7616 3.4.4
+ c.username = c.h(fmt.Sprintf("%s:%s", c.username, c.realm))
+ sl = append(sl, fmt.Sprintf(`userhash=%s`, c.userhash))
+ }
+ sl = append(sl, fmt.Sprintf(`username="%s"`, c.username))
+ sl = append(sl, fmt.Sprintf(`realm="%s"`, c.realm))
+ sl = append(sl, fmt.Sprintf(`nonce="%s"`, c.nonce))
+ sl = append(sl, fmt.Sprintf(`uri="%s"`, c.digestURI))
+ sl = append(sl, fmt.Sprintf(`response="%s"`, resp))
+ sl = append(sl, fmt.Sprintf(`algorithm=%s`, c.algorithm))
+ if c.opaque != "" {
+ sl = append(sl, fmt.Sprintf(`opaque="%s"`, c.opaque))
+ }
+ if c.messageQop != "" {
+ sl = append(sl, fmt.Sprintf("qop=%s", c.messageQop))
+ sl = append(sl, fmt.Sprintf("nc=%08x", c.nc))
+ sl = append(sl, fmt.Sprintf(`cnonce="%s"`, c.cNonce))
+ }
+
+ return fmt.Sprintf("Digest %s", strings.Join(sl, ", ")), nil
+}
+
+func (c *credentials) validateQop() error {
+ // Currently only supporting auth quality of protection. TODO: add auth-int support
+ // NOTE: cURL support auth-int qop for requests other than POST and PUT (i.e. w/o body) by hashing an empty string
+ // is this applicable for resty? see: https://github.com/curl/curl/blob/307b7543ea1e73ab04e062bdbe4b5bb409eaba3a/lib/vauth/digest.c#L774
+ if c.messageQop == "" {
+ return ErrDigestNoQop
+ }
+ possibleQops := strings.Split(c.messageQop, ", ")
+ var authSupport bool
+ for _, qop := range possibleQops {
+ if qop == "auth" {
+ authSupport = true
+ break
+ }
+ }
+ if !authSupport {
+ return ErrDigestQopNotSupported
+ }
+
+ c.messageQop = "auth"
+
+ return nil
+}
+
+func (c *credentials) h(data string) string {
+ hfCtor := hashFuncs[c.algorithm]
+ hf := hfCtor()
+ _, _ = hf.Write([]byte(data)) // Hash.Write never returns an error
+ return fmt.Sprintf("%x", hf.Sum(nil))
+}
+
+func (c *credentials) resp() (string, error) {
+ c.nc++
+
+ b := make([]byte, 16)
+ _, err := io.ReadFull(rand.Reader, b)
+ if err != nil {
+ return "", err
+ }
+ c.cNonce = fmt.Sprintf("%x", b)[:32]
+
+ ha1 := c.ha1()
+ ha2 := c.ha2()
+
+ return c.kd(ha1, fmt.Sprintf("%s:%08x:%s:%s:%s",
+ c.nonce, c.nc, c.cNonce, c.messageQop, ha2)), nil
+}
+
+func (c *credentials) kd(secret, data string) string {
+ return c.h(fmt.Sprintf("%s:%s", secret, data))
+}
+
+// RFC 7616 3.4.2
+func (c *credentials) ha1() string {
+ ret := c.h(fmt.Sprintf("%s:%s:%s", c.username, c.realm, c.password))
+ if c.sessionAlg {
+ return c.h(fmt.Sprintf("%s:%s:%s", ret, c.nonce, c.cNonce))
+ }
+
+ return ret
+}
+
+// RFC 7616 3.4.3
+func (c *credentials) ha2() string {
+ // currently no auth-int support
+ return c.h(fmt.Sprintf("%s:%s", c.method, c.digestURI))
+}
diff --git a/vendor/github.com/go-resty/resty/v2/middleware.go b/vendor/github.com/go-resty/resty/v2/middleware.go
new file mode 100644
index 0000000..333be41
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/middleware.go
@@ -0,0 +1,567 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+)
+
+const debugRequestLogKey = "__restyDebugRequestLog"
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Middleware(s)
+//_______________________________________________________________________
+
+func parseRequestURL(c *Client, r *Request) error {
+ // GitHub #103 Path Params
+ if len(r.PathParams) > 0 {
+ for p, v := range r.PathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+ if len(c.PathParams) > 0 {
+ for p, v := range c.PathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+
+ // GitHub #663 Raw Path Params
+ if len(r.RawPathParams) > 0 {
+ for p, v := range r.RawPathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", v, -1)
+ }
+ }
+ if len(c.RawPathParams) > 0 {
+ for p, v := range c.RawPathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", v, -1)
+ }
+ }
+
+ // Parsing request URL
+ reqURL, err := url.Parse(r.URL)
+ if err != nil {
+ return err
+ }
+
+ // If Request.URL is relative path then added c.HostURL into
+ // the request URL otherwise Request.URL will be used as-is
+ if !reqURL.IsAbs() {
+ r.URL = reqURL.String()
+ if len(r.URL) > 0 && r.URL[0] != '/' {
+ r.URL = "/" + r.URL
+ }
+
+ // TODO: change to use c.BaseURL only in v3.0.0
+ baseURL := c.BaseURL
+ if len(baseURL) == 0 {
+ baseURL = c.HostURL
+ }
+ reqURL, err = url.Parse(baseURL + r.URL)
+ if err != nil {
+ return err
+ }
+ }
+
+ // GH #407 && #318
+ if reqURL.Scheme == "" && len(c.scheme) > 0 {
+ reqURL.Scheme = c.scheme
+ }
+
+ // Adding Query Param
+ query := make(url.Values)
+ for k, v := range c.QueryParam {
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.QueryParam {
+ // remove query param from client level by key
+ // since overrides happens for that key in the request
+ query.Del(k)
+
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ // GitHub #123 Preserve query string order partially.
+ // Since not feasible in `SetQuery*` resty methods, because
+ // standard package `url.Encode(...)` sorts the query params
+ // alphabetically
+ if len(query) > 0 {
+ if IsStringEmpty(reqURL.RawQuery) {
+ reqURL.RawQuery = query.Encode()
+ } else {
+ reqURL.RawQuery = reqURL.RawQuery + "&" + query.Encode()
+ }
+ }
+
+ r.URL = reqURL.String()
+
+ return nil
+}
+
+func parseRequestHeader(c *Client, r *Request) error {
+ for k, v := range c.Header {
+ if _, ok := r.Header[k]; ok {
+ continue
+ }
+ r.Header[k] = v[:]
+ }
+
+ if IsStringEmpty(r.Header.Get(hdrUserAgentKey)) {
+ r.Header.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+
+ if ct := r.Header.Get(hdrContentTypeKey); IsStringEmpty(r.Header.Get(hdrAcceptKey)) && !IsStringEmpty(ct) && (IsJSONType(ct) || IsXMLType(ct)) {
+ r.Header.Set(hdrAcceptKey, r.Header.Get(hdrContentTypeKey))
+ }
+
+ return nil
+}
+
+func parseRequestBody(c *Client, r *Request) (err error) {
+ if isPayloadSupported(r.Method, c.AllowGetMethodPayload) {
+ // Handling Multipart
+ if r.isMultiPart {
+ if err = handleMultipart(c, r); err != nil {
+ return
+ }
+
+ goto CL
+ }
+
+ // Handling Form Data
+ if len(c.FormData) > 0 || len(r.FormData) > 0 {
+ handleFormData(c, r)
+
+ goto CL
+ }
+
+ // Handling Request body
+ if r.Body != nil {
+ handleContentType(c, r)
+
+ if err = handleRequestBody(c, r); err != nil {
+ return
+ }
+ }
+ }
+
+CL:
+ // by default resty won't set content length, you can if you want to :)
+ if c.setContentLength || r.setContentLength {
+ if r.bodyBuf == nil {
+ r.Header.Set(hdrContentLengthKey, "0")
+ } else {
+ r.Header.Set(hdrContentLengthKey, fmt.Sprintf("%d", r.bodyBuf.Len()))
+ }
+ }
+
+ return
+}
+
+func createHTTPRequest(c *Client, r *Request) (err error) {
+ if r.bodyBuf == nil {
+ if reader, ok := r.Body.(io.Reader); ok && isPayloadSupported(r.Method, c.AllowGetMethodPayload) {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, reader)
+ } else if c.setContentLength || r.setContentLength {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, http.NoBody)
+ } else {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, nil)
+ }
+ } else {
+ // fix data race: must deep copy.
+ bodyBuf := bytes.NewBuffer(append([]byte{}, r.bodyBuf.Bytes()...))
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, bodyBuf)
+ }
+
+ if err != nil {
+ return
+ }
+
+ // Assign close connection option
+ r.RawRequest.Close = c.closeConnection
+
+ // Add headers into http request
+ r.RawRequest.Header = r.Header
+
+ // Add cookies from client instance into http request
+ for _, cookie := range c.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // Add cookies from request instance into http request
+ for _, cookie := range r.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // Enable trace
+ if c.trace || r.trace {
+ r.clientTrace = &clientTrace{}
+ r.ctx = r.clientTrace.createContext(r.Context())
+ }
+
+ // Use context if it was specified
+ if r.ctx != nil {
+ r.RawRequest = r.RawRequest.WithContext(r.ctx)
+ }
+
+ bodyCopy, err := getBodyCopy(r)
+ if err != nil {
+ return err
+ }
+
+ // assign get body func for the underlying raw request instance
+ r.RawRequest.GetBody = func() (io.ReadCloser, error) {
+ if bodyCopy != nil {
+ return io.NopCloser(bytes.NewReader(bodyCopy.Bytes())), nil
+ }
+ return nil, nil
+ }
+
+ return
+}
+
+func addCredentials(c *Client, r *Request) error {
+ var isBasicAuth bool
+ // Basic Auth
+ if r.UserInfo != nil { // takes precedence
+ r.RawRequest.SetBasicAuth(r.UserInfo.Username, r.UserInfo.Password)
+ isBasicAuth = true
+ } else if c.UserInfo != nil {
+ r.RawRequest.SetBasicAuth(c.UserInfo.Username, c.UserInfo.Password)
+ isBasicAuth = true
+ }
+
+ if !c.DisableWarn {
+ if isBasicAuth && !strings.HasPrefix(r.URL, "https") {
+ r.log.Warnf("Using Basic Auth in HTTP mode is not secure, use HTTPS")
+ }
+ }
+
+ // Set the Authorization Header Scheme
+ var authScheme string
+ if !IsStringEmpty(r.AuthScheme) {
+ authScheme = r.AuthScheme
+ } else if !IsStringEmpty(c.AuthScheme) {
+ authScheme = c.AuthScheme
+ } else {
+ authScheme = "Bearer"
+ }
+
+ // Build the Token Auth header
+ if !IsStringEmpty(r.Token) { // takes precedence
+ r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+r.Token)
+ } else if !IsStringEmpty(c.Token) {
+ r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+c.Token)
+ }
+
+ return nil
+}
+
+func requestLogger(c *Client, r *Request) error {
+ if r.Debug {
+ rr := r.RawRequest
+ rl := &RequestLog{Header: copyHeaders(rr.Header), Body: r.fmtBodyString(c.debugBodySizeLimit)}
+ if c.requestLog != nil {
+ if err := c.requestLog(rl); err != nil {
+ return err
+ }
+ }
+ // fmt.Sprintf("COOKIES:\n%s\n", composeCookies(c.GetClient().Jar, *rr.URL)) +
+
+ reqLog := "\n==============================================================================\n" +
+ "~~~ REQUEST ~~~\n" +
+ fmt.Sprintf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) +
+ fmt.Sprintf("HOST : %s\n", rr.URL.Host) +
+ fmt.Sprintf("HEADERS:\n%s\n", composeHeaders(c, r, rl.Header)) +
+ fmt.Sprintf("BODY :\n%v\n", rl.Body) +
+ "------------------------------------------------------------------------------\n"
+
+ r.initValuesMap()
+ r.values[debugRequestLogKey] = reqLog
+ }
+
+ return nil
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Middleware(s)
+//_______________________________________________________________________
+
+func responseLogger(c *Client, res *Response) error {
+ if res.Request.Debug {
+ rl := &ResponseLog{Header: copyHeaders(res.Header()), Body: res.fmtBodyString(c.debugBodySizeLimit)}
+ if c.responseLog != nil {
+ if err := c.responseLog(rl); err != nil {
+ return err
+ }
+ }
+
+ debugLog := res.Request.values[debugRequestLogKey].(string)
+ debugLog += "~~~ RESPONSE ~~~\n" +
+ fmt.Sprintf("STATUS : %s\n", res.Status()) +
+ fmt.Sprintf("PROTO : %s\n", res.RawResponse.Proto) +
+ fmt.Sprintf("RECEIVED AT : %v\n", res.ReceivedAt().Format(time.RFC3339Nano)) +
+ fmt.Sprintf("TIME DURATION: %v\n", res.Time()) +
+ "HEADERS :\n" +
+ composeHeaders(c, res.Request, rl.Header) + "\n"
+ if res.Request.isSaveResponse {
+ debugLog += "BODY :\n***** RESPONSE WRITTEN INTO FILE *****\n"
+ } else {
+ debugLog += fmt.Sprintf("BODY :\n%v\n", rl.Body)
+ }
+ debugLog += "==============================================================================\n"
+
+ res.Request.log.Debugf("%s", debugLog)
+ }
+
+ return nil
+}
+
+func parseResponseBody(c *Client, res *Response) (err error) {
+ if res.StatusCode() == http.StatusNoContent {
+ res.Request.Error = nil
+ return
+ }
+ // Handles only JSON or XML content type
+ ct := firstNonEmpty(res.Request.forceContentType, res.Header().Get(hdrContentTypeKey), res.Request.fallbackContentType)
+ if IsJSONType(ct) || IsXMLType(ct) {
+ // HTTP status code > 199 and < 300, considered as Result
+ if res.IsSuccess() {
+ res.Request.Error = nil
+ if res.Request.Result != nil {
+ err = Unmarshalc(c, ct, res.body, res.Request.Result)
+ return
+ }
+ }
+
+ // HTTP status code > 399, considered as Error
+ if res.IsError() {
+ // global error interface
+ if res.Request.Error == nil && c.Error != nil {
+ res.Request.Error = reflect.New(c.Error).Interface()
+ }
+
+ if res.Request.Error != nil {
+ unmarshalErr := Unmarshalc(c, ct, res.body, res.Request.Error)
+ if unmarshalErr != nil {
+ c.log.Warnf("Cannot unmarshal response body: %s", unmarshalErr)
+ }
+ }
+ }
+ }
+
+ return
+}
+
+func handleMultipart(c *Client, r *Request) (err error) {
+ r.bodyBuf = acquireBuffer()
+ w := multipart.NewWriter(r.bodyBuf)
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+
+ for k, v := range r.FormData {
+ for _, iv := range v {
+ if strings.HasPrefix(k, "@") { // file
+ err = addFile(w, k[1:], iv)
+ if err != nil {
+ return
+ }
+ } else { // form value
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // #21 - adding io.Reader support
+ if len(r.multipartFiles) > 0 {
+ for _, f := range r.multipartFiles {
+ err = addFileReader(w, f)
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ // GitHub #130 adding multipart field support with content type
+ if len(r.multipartFields) > 0 {
+ for _, mf := range r.multipartFields {
+ if err = addMultipartFormField(w, mf); err != nil {
+ return
+ }
+ }
+ }
+
+ r.Header.Set(hdrContentTypeKey, w.FormDataContentType())
+ err = w.Close()
+
+ return
+}
+
+func handleFormData(c *Client, r *Request) {
+ formData := url.Values{}
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.FormData {
+ // remove form data field from client level by key
+ // since overrides happens for that key in the request
+ formData.Del(k)
+
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ r.bodyBuf = bytes.NewBuffer([]byte(formData.Encode()))
+ r.Header.Set(hdrContentTypeKey, formContentType)
+ r.isFormData = true
+}
+
+func handleContentType(c *Client, r *Request) {
+ contentType := r.Header.Get(hdrContentTypeKey)
+ if IsStringEmpty(contentType) {
+ contentType = DetectContentType(r.Body)
+ r.Header.Set(hdrContentTypeKey, contentType)
+ }
+}
+
+func handleRequestBody(c *Client, r *Request) (err error) {
+ var bodyBytes []byte
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ r.bodyBuf = nil
+
+ if reader, ok := r.Body.(io.Reader); ok {
+ if c.setContentLength || r.setContentLength { // keep backward compatibility
+ r.bodyBuf = acquireBuffer()
+ _, err = r.bodyBuf.ReadFrom(reader)
+ r.Body = nil
+ } else {
+ // Otherwise buffer less processing for `io.Reader`, sounds good.
+ return
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ bodyBytes = b
+ } else if s, ok := r.Body.(string); ok {
+ bodyBytes = []byte(s)
+ } else if IsJSONType(contentType) &&
+ (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) {
+ r.bodyBuf, err = jsonMarshal(c, r, r.Body)
+ if err != nil {
+ return
+ }
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ bodyBytes, err = c.XMLMarshal(r.Body)
+ if err != nil {
+ return
+ }
+ }
+
+ if bodyBytes == nil && r.bodyBuf == nil {
+ err = errors.New("unsupported 'Body' type/value")
+ }
+
+ // if any errors during body bytes handling, return it
+ if err != nil {
+ return
+ }
+
+ // []byte into Buffer
+ if bodyBytes != nil && r.bodyBuf == nil {
+ r.bodyBuf = acquireBuffer()
+ _, _ = r.bodyBuf.Write(bodyBytes)
+ }
+
+ return
+}
+
+func saveResponseIntoFile(c *Client, res *Response) error {
+ if res.Request.isSaveResponse {
+ file := ""
+
+ if len(c.outputDirectory) > 0 && !filepath.IsAbs(res.Request.outputFile) {
+ file += c.outputDirectory + string(filepath.Separator)
+ }
+
+ file = filepath.Clean(file + res.Request.outputFile)
+ if err := createDirectory(filepath.Dir(file)); err != nil {
+ return err
+ }
+
+ outFile, err := os.Create(file)
+ if err != nil {
+ return err
+ }
+ defer closeq(outFile)
+
+ // io.Copy reads maximum 32kb size, it is perfect for large file download too
+ defer closeq(res.RawResponse.Body)
+
+ written, err := io.Copy(outFile, res.RawResponse.Body)
+ if err != nil {
+ return err
+ }
+
+ res.size = written
+ }
+
+ return nil
+}
+
+func getBodyCopy(r *Request) (*bytes.Buffer, error) {
+ // If r.bodyBuf present, return the copy
+ if r.bodyBuf != nil {
+ bodyCopy := acquireBuffer()
+ if _, err := io.Copy(bodyCopy, bytes.NewReader(r.bodyBuf.Bytes())); err != nil {
+ // cannot use io.Copy(bodyCopy, r.bodyBuf) because io.Copy reset r.bodyBuf
+ return nil, err
+ }
+ return bodyCopy, nil
+ }
+
+ // Maybe body is `io.Reader`.
+ // Note: Resty user have to watchout for large body size of `io.Reader`
+ if r.RawRequest.Body != nil {
+ b, err := io.ReadAll(r.RawRequest.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Restore the Body
+ closeq(r.RawRequest.Body)
+ r.RawRequest.Body = io.NopCloser(bytes.NewBuffer(b))
+
+ // Return the Body bytes
+ return bytes.NewBuffer(b), nil
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/redirect.go b/vendor/github.com/go-resty/resty/v2/redirect.go
new file mode 100644
index 0000000..ed58d73
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/redirect.go
@@ -0,0 +1,109 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+)
+
+var (
+ // Since v2.8.0
+ ErrAutoRedirectDisabled = errors.New("auto redirect is disabled")
+)
+
+type (
+ // RedirectPolicy to regulate the redirects in the resty client.
+ // Objects implementing the RedirectPolicy interface can be registered as
+ //
+ // Apply function should return nil to continue the redirect journey, otherwise
+ // return error to stop the redirect.
+ RedirectPolicy interface {
+ Apply(req *http.Request, via []*http.Request) error
+ }
+
+ // The RedirectPolicyFunc type is an adapter to allow the use of ordinary functions as RedirectPolicy.
+ // If f is a function with the appropriate signature, RedirectPolicyFunc(f) is a RedirectPolicy object that calls f.
+ RedirectPolicyFunc func(*http.Request, []*http.Request) error
+)
+
+// Apply calls f(req, via).
+func (f RedirectPolicyFunc) Apply(req *http.Request, via []*http.Request) error {
+ return f(req, via)
+}
+
+// NoRedirectPolicy is used to disable redirects in the HTTP client
+//
+// resty.SetRedirectPolicy(NoRedirectPolicy())
+func NoRedirectPolicy() RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ return ErrAutoRedirectDisabled
+ })
+}
+
+// FlexibleRedirectPolicy is convenient method to create No of redirect policy for HTTP client.
+//
+// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+func FlexibleRedirectPolicy(noOfRedirect int) RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if len(via) >= noOfRedirect {
+ return fmt.Errorf("stopped after %d redirects", noOfRedirect)
+ }
+ checkHostAndAddHeaders(req, via[0])
+ return nil
+ })
+}
+
+// DomainCheckRedirectPolicy is convenient method to define domain name redirect rule in resty client.
+// Redirect is allowed for only mentioned host in the policy.
+//
+// resty.SetRedirectPolicy(DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+func DomainCheckRedirectPolicy(hostnames ...string) RedirectPolicy {
+ hosts := make(map[string]bool)
+ for _, h := range hostnames {
+ hosts[strings.ToLower(h)] = true
+ }
+
+ fn := RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if ok := hosts[getHostname(req.URL.Host)]; !ok {
+ return errors.New("redirect is not allowed as per DomainCheckRedirectPolicy")
+ }
+
+ return nil
+ })
+
+ return fn
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+func getHostname(host string) (hostname string) {
+ if strings.Index(host, ":") > 0 {
+ host, _, _ = net.SplitHostPort(host)
+ }
+ hostname = strings.ToLower(host)
+ return
+}
+
+// By default Golang will not redirect request headers
+// after go throwing various discussion comments from thread
+// https://github.com/golang/go/issues/4800
+// Resty will add all the headers during a redirect for the same host
+func checkHostAndAddHeaders(cur *http.Request, pre *http.Request) {
+ curHostname := getHostname(cur.URL.Host)
+ preHostname := getHostname(pre.URL.Host)
+ if strings.EqualFold(curHostname, preHostname) {
+ for key, val := range pre.Header {
+ cur.Header[key] = val
+ }
+ } else { // only library User-Agent header is added
+ cur.Header.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/request.go b/vendor/github.com/go-resty/resty/v2/request.go
new file mode 100644
index 0000000..fec0976
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/request.go
@@ -0,0 +1,1093 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request struct and methods
+//_______________________________________________________________________
+
+// Request struct is used to compose and fire individual request from
+// resty client. Request provides an options to override client level
+// settings and also an options for the request composition.
+type Request struct {
+ URL string
+ Method string
+ Token string
+ AuthScheme string
+ QueryParam url.Values
+ FormData url.Values
+ PathParams map[string]string
+ RawPathParams map[string]string
+ Header http.Header
+ Time time.Time
+ Body interface{}
+ Result interface{}
+ Error interface{}
+ RawRequest *http.Request
+ SRV *SRVRecord
+ UserInfo *User
+ Cookies []*http.Cookie
+ Debug bool
+
+ // Attempt is to represent the request attempt made during a Resty
+ // request execution flow, including retry count.
+ //
+ // Since v2.4.0
+ Attempt int
+
+ isMultiPart bool
+ isFormData bool
+ setContentLength bool
+ isSaveResponse bool
+ notParseResponse bool
+ jsonEscapeHTML bool
+ trace bool
+ outputFile string
+ fallbackContentType string
+ forceContentType string
+ ctx context.Context
+ values map[string]interface{}
+ client *Client
+ bodyBuf *bytes.Buffer
+ clientTrace *clientTrace
+ log Logger
+ multipartFiles []*File
+ multipartFields []*MultipartField
+ retryConditions []RetryConditionFunc
+}
+
+// Context method returns the Context if its already set in request
+// otherwise it creates new one using `context.Background()`.
+func (r *Request) Context() context.Context {
+ if r.ctx == nil {
+ return context.Background()
+ }
+ return r.ctx
+}
+
+// SetContext method sets the context.Context for current Request. It allows
+// to interrupt the request execution if ctx.Done() channel is closed.
+// See https://blog.golang.org/context article and the "context" package
+// documentation.
+func (r *Request) SetContext(ctx context.Context) *Request {
+ r.ctx = ctx
+ return r
+}
+
+// SetHeader method is to set a single header field and its value in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`.
+//
+// client.R().
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+//
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeader(header, value string) *Request {
+ r.Header.Set(header, value)
+ return r
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.R().
+// SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+//
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeaders(headers map[string]string) *Request {
+ for h, v := range headers {
+ r.SetHeader(h, v)
+ }
+ return r
+}
+
+// SetHeaderMultiValues sets multiple headers fields and its values is list of strings at one go in the current request.
+//
+// For Example: To set `Accept` as `text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8`
+//
+// client.R().
+// SetHeaderMultiValues(map[string][]string{
+// "Accept": []string{"text/html", "application/xhtml+xml", "application/xml;q=0.9", "image/webp", "*/*;q=0.8"},
+// })
+//
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeaderMultiValues(headers map[string][]string) *Request {
+ for key, values := range headers {
+ r.SetHeader(key, strings.Join(values, ", "))
+ }
+ return r
+}
+
+// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
+//
+// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
+//
+// client.R().
+// SetHeaderVerbatim("all_lowercase", "available").
+// SetHeaderVerbatim("UPPERCASE", "available")
+//
+// Also you can override header value, which was set at client instance level.
+//
+// Since v2.6.0
+func (r *Request) SetHeaderVerbatim(header, value string) *Request {
+ r.Header[header] = []string{value}
+ return r
+}
+
+// SetQueryParam method sets single parameter and its value in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+//
+// client.R().
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+//
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParam(param, value string) *Request {
+ r.QueryParam.Set(param, value)
+ return r
+}
+
+// SetQueryParams method sets multiple parameters and its values at one go in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+//
+// client.R().
+// SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+//
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetQueryParam(p, v)
+ }
+ return r
+}
+
+// SetQueryParamsFromValues method appends multiple parameters with multi-value
+// (`url.Values`) at one go in the current request. It will be formed as
+// query string for the request.
+//
+// For Example: `status=pending&status=approved&status=open` in the URL after `?` mark.
+//
+// client.R().
+// SetQueryParamsFromValues(url.Values{
+// "status": []string{"pending", "approved", "open"},
+// })
+//
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParamsFromValues(params url.Values) *Request {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ return r
+}
+
+// SetQueryString method provides ability to use string as an input to set URL query string for the request.
+//
+// Using String as an input
+//
+// client.R().
+// SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+func (r *Request) SetQueryString(query string) *Request {
+ params, err := url.ParseQuery(strings.TrimSpace(query))
+ if err == nil {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ } else {
+ r.log.Errorf("%v", err)
+ }
+ return r
+}
+
+// SetFormData method sets Form parameters and their values in the current request.
+// It's applicable only HTTP method `POST` and `PUT` and requests content type would be set as
+// `application/x-www-form-urlencoded`.
+//
+// client.R().
+// SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+//
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormData(data map[string]string) *Request {
+ for k, v := range data {
+ r.FormData.Set(k, v)
+ }
+ return r
+}
+
+// SetFormDataFromValues method appends multiple form parameters with multi-value
+// (`url.Values`) at one go in the current request.
+//
+// client.R().
+// SetFormDataFromValues(url.Values{
+// "search_criteria": []string{"book", "glass", "pencil"},
+// })
+//
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormDataFromValues(data url.Values) *Request {
+ for k, v := range data {
+ for _, kv := range v {
+ r.FormData.Add(k, kv)
+ }
+ }
+ return r
+}
+
+// SetBody method sets the request body for the request. It supports various realtime needs as easy.
+// We can say its quite handy or powerful. Supported request body data types is `string`,
+// `[]byte`, `struct`, `map`, `slice` and `io.Reader`. Body value can be pointer or non-pointer.
+// Automatic marshalling for JSON and XML content type, if it is `struct`, `map`, or `slice`.
+//
+// Note: `io.Reader` is processed as bufferless mode while sending request.
+//
+// For Example: Struct as a body input, based on content type, it will be marshalled.
+//
+// client.R().
+// SetBody(User{
+// Username: "jeeva@myjeeva.com",
+// Password: "welcome2resty",
+// })
+//
+// Map as a body input, based on content type, it will be marshalled.
+//
+// client.R().
+// SetBody(map[string]interface{}{
+// "username": "jeeva@myjeeva.com",
+// "password": "welcome2resty",
+// "address": &Address{
+// Address1: "1111 This is my street",
+// Address2: "Apt 201",
+// City: "My City",
+// State: "My State",
+// ZipCode: 00000,
+// },
+// })
+//
+// String as a body input. Suitable for any need as a string input.
+//
+// client.R().
+// SetBody(`{
+// "username": "jeeva@getrightcare.com",
+// "password": "admin"
+// }`)
+//
+// []byte as a body input. Suitable for raw request such as file upload, serialize & deserialize, etc.
+//
+// client.R().
+// SetBody([]byte("This is my raw request, sent as-is"))
+func (r *Request) SetBody(body interface{}) *Request {
+ r.Body = body
+ return r
+}
+
+// SetResult method is to register the response `Result` object for automatic unmarshalling for the request,
+// if response status code is between 200 and 299 and content type either JSON or XML.
+//
+// Note: Result object can be pointer or non-pointer.
+//
+// client.R().SetResult(&AuthToken{})
+// // OR
+// client.R().SetResult(AuthToken{})
+//
+// Accessing a result value from response instance.
+//
+// response.Result().(*AuthToken)
+func (r *Request) SetResult(res interface{}) *Request {
+ if res != nil {
+ r.Result = getPointer(res)
+ }
+ return r
+}
+
+// SetError method is to register the request `Error` object for automatic unmarshalling for the request,
+// if response status code is greater than 399 and content type either JSON or XML.
+//
+// Note: Error object can be pointer or non-pointer.
+//
+// client.R().SetError(&AuthError{})
+// // OR
+// client.R().SetError(AuthError{})
+//
+// Accessing a error value from response instance.
+//
+// response.Error().(*AuthError)
+func (r *Request) SetError(err interface{}) *Request {
+ r.Error = getPointer(err)
+ return r
+}
+
+// SetFile method is to set single file field name and its path for multipart upload.
+//
+// client.R().
+// SetFile("my_file", "/Users/jeeva/Gas Bill - Sep.pdf")
+func (r *Request) SetFile(param, filePath string) *Request {
+ r.isMultiPart = true
+ r.FormData.Set("@"+param, filePath)
+ return r
+}
+
+// SetFiles method is to set multiple file field name and its path for multipart upload.
+//
+// client.R().
+// SetFiles(map[string]string{
+// "my_file1": "/Users/jeeva/Gas Bill - Sep.pdf",
+// "my_file2": "/Users/jeeva/Electricity Bill - Sep.pdf",
+// "my_file3": "/Users/jeeva/Water Bill - Sep.pdf",
+// })
+func (r *Request) SetFiles(files map[string]string) *Request {
+ r.isMultiPart = true
+ for f, fp := range files {
+ r.FormData.Set("@"+f, fp)
+ }
+ return r
+}
+
+// SetFileReader method is to set single file using io.Reader for multipart upload.
+//
+// client.R().
+// SetFileReader("profile_img", "my-profile-img.png", bytes.NewReader(profileImgBytes)).
+// SetFileReader("notes", "user-notes.txt", bytes.NewReader(notesBytes))
+func (r *Request) SetFileReader(param, fileName string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFiles = append(r.multipartFiles, &File{
+ Name: fileName,
+ ParamName: param,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartFormData method allows simple form data to be attached to the request as `multipart:form-data`
+func (r *Request) SetMultipartFormData(data map[string]string) *Request {
+ for k, v := range data {
+ r = r.SetMultipartField(k, "", "", strings.NewReader(v))
+ }
+
+ return r
+}
+
+// SetMultipartField method is to set custom data using io.Reader for multipart upload.
+func (r *Request) SetMultipartField(param, fileName, contentType string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, &MultipartField{
+ Param: param,
+ FileName: fileName,
+ ContentType: contentType,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartFields method is to set multiple data fields using io.Reader for multipart upload.
+//
+// For Example:
+//
+// client.R().SetMultipartFields(
+// &resty.MultipartField{
+// Param: "uploadManifest1",
+// FileName: "upload-file-1.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 1", "_filename" : ["file1.txt"]}}`),
+// },
+// &resty.MultipartField{
+// Param: "uploadManifest2",
+// FileName: "upload-file-2.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 2", "_filename" : ["file2.txt"]}}`),
+// })
+//
+// If you have slice already, then simply call-
+//
+// client.R().SetMultipartFields(fields...)
+func (r *Request) SetMultipartFields(fields ...*MultipartField) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, fields...)
+ return r
+}
+
+// SetContentLength method sets the HTTP header `Content-Length` value for current request.
+// By default Resty won't set `Content-Length`. Also you have an option to enable for every
+// request.
+//
+// See `Client.SetContentLength`
+//
+// client.R().SetContentLength(true)
+func (r *Request) SetContentLength(l bool) *Request {
+ r.setContentLength = l
+ return r
+}
+
+// SetBasicAuth method sets the basic authentication header in the current HTTP request.
+//
+// For Example:
+//
+// Authorization: Basic
+//
+// To set the header for username "go-resty" and password "welcome"
+//
+// client.R().SetBasicAuth("go-resty", "welcome")
+//
+// This method overrides the credentials set by method `Client.SetBasicAuth`.
+func (r *Request) SetBasicAuth(username, password string) *Request {
+ r.UserInfo = &User{Username: username, Password: password}
+ return r
+}
+
+// SetAuthToken method sets the auth token header(Default Scheme: Bearer) in the current HTTP request. Header example:
+//
+// Authorization: Bearer
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.R().SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This method overrides the Auth token set by method `Client.SetAuthToken`.
+func (r *Request) SetAuthToken(token string) *Request {
+ r.Token = token
+ return r
+}
+
+// SetAuthScheme method sets the auth token scheme type in the HTTP request. For Example:
+//
+// Authorization:
+//
+// For Example: To set the scheme to use OAuth
+//
+// client.R().SetAuthScheme("OAuth")
+//
+// This auth header scheme gets added to all the request raised from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// Information about Auth schemes can be found in RFC7235 which is linked to below along with the page containing
+// the currently defined official authentication schemes:
+//
+// https://tools.ietf.org/html/rfc7235
+// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
+//
+// This method overrides the Authorization scheme set by method `Client.SetAuthScheme`.
+func (r *Request) SetAuthScheme(scheme string) *Request {
+ r.AuthScheme = scheme
+ return r
+}
+
+// SetDigestAuth method sets the Digest Access auth scheme for the HTTP request. If a server responds with 401 and sends
+// a Digest challenge in the WWW-Authenticate Header, the request will be resent with the appropriate Authorization Header.
+//
+// For Example: To set the Digest scheme with username "Mufasa" and password "Circle Of Life"
+//
+// client.R().SetDigestAuth("Mufasa", "Circle Of Life")
+//
+// Information about Digest Access Authentication can be found in RFC7616:
+//
+// https://datatracker.ietf.org/doc/html/rfc7616
+//
+// This method overrides the username and password set by method `Client.SetDigestAuth`.
+func (r *Request) SetDigestAuth(username, password string) *Request {
+ oldTransport := r.client.httpClient.Transport
+ r.client.OnBeforeRequest(func(c *Client, _ *Request) error {
+ c.httpClient.Transport = &digestTransport{
+ digestCredentials: digestCredentials{username, password},
+ transport: oldTransport,
+ }
+ return nil
+ })
+ r.client.OnAfterResponse(func(c *Client, _ *Response) error {
+ c.httpClient.Transport = oldTransport
+ return nil
+ })
+
+ return r
+}
+
+// SetOutput method sets the output file for current HTTP request. Current HTTP response will be
+// saved into given file. It is similar to `curl -o` flag. Absolute path or relative path can be used.
+// If is it relative path then output file goes under the output directory, as mentioned
+// in the `Client.SetOutputDirectory`.
+//
+// client.R().
+// SetOutput("/Users/jeeva/Downloads/ReplyWithHeader-v5.1-beta.zip").
+// Get("http://bit.ly/1LouEKr")
+//
+// Note: In this scenario `Response.Body` might be nil.
+func (r *Request) SetOutput(file string) *Request {
+ r.outputFile = file
+ r.isSaveResponse = true
+ return r
+}
+
+// SetSRV method sets the details to query the service SRV record and execute the
+// request.
+//
+// client.R().
+// SetSRV(SRVRecord{"web", "testservice.com"}).
+// Get("/get")
+func (r *Request) SetSRV(srv *SRVRecord) *Request {
+ r.SRV = srv
+ return r
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (r *Request) SetDoNotParseResponse(parse bool) *Request {
+ r.notParseResponse = parse
+ return r
+}
+
+// SetPathParam method sets single URL path key-value pair in the
+// Resty current request instance.
+//
+// client.R().SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+//
+// client.R().SetPathParam("path", "groups/developers")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/groups%2Fdevelopers/details
+//
+// It replaces the value of the key while composing the request URL.
+// The values will be escaped using `url.PathEscape` function.
+//
+// Also you can override Path Params value, which was set at client instance
+// level.
+func (r *Request) SetPathParam(param, value string) *Request {
+ r.PathParams[param] = value
+ return r
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty current request instance.
+//
+// client.R().SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// "path": "groups/developers",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/{path}/details
+// Composed URL - /v1/users/sample@sample.com/100002/groups%2Fdevelopers/details
+//
+// It replaces the value of the key while composing request URL.
+// The value will be used as it is and will not be escaped.
+//
+// Also you can override Path Params value, which was set at client instance
+// level.
+func (r *Request) SetPathParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetPathParam(p, v)
+ }
+ return r
+}
+
+// SetRawPathParam method sets single URL path key-value pair in the
+// Resty current request instance.
+//
+// client.R().SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+//
+// client.R().SetPathParam("path", "groups/developers")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/groups/developers/details
+//
+// It replaces the value of the key while composing the request URL.
+// The value will be used as it is and will not be escaped.
+//
+// Also you can override Path Params value, which was set at client instance
+// level.
+//
+// Since v2.8.0
+func (r *Request) SetRawPathParam(param, value string) *Request {
+ r.RawPathParams[param] = value
+ return r
+}
+
+// SetRawPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty current request instance.
+//
+// client.R().SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// "path": "groups/developers",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/{path}/details
+// Composed URL - /v1/users/sample@sample.com/100002/groups/developers/details
+//
+// It replaces the value of the key while composing request URL.
+// The values will be used as they are and will not be escaped.
+//
+// Also you can override Path Params value, which was set at client instance
+// level.
+//
+// Since v2.8.0
+func (r *Request) SetRawPathParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetRawPathParam(p, v)
+ }
+ return r
+}
+
+// ExpectContentType method allows to provide fallback `Content-Type` for automatic unmarshalling
+// when `Content-Type` response header is unavailable.
+func (r *Request) ExpectContentType(contentType string) *Request {
+ r.fallbackContentType = contentType
+ return r
+}
+
+// ForceContentType method provides a strong sense of response `Content-Type` for automatic unmarshalling.
+// Resty gives this a higher priority than the `Content-Type` response header. This means that if both
+// `Request.ForceContentType` is set and the response `Content-Type` is available, `ForceContentType` will win.
+func (r *Request) ForceContentType(contentType string) *Request {
+ r.forceContentType = contentType
+ return r
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (r *Request) SetJSONEscapeHTML(b bool) *Request {
+ r.jsonEscapeHTML = b
+ return r
+}
+
+// SetCookie method appends a single cookie in the current request instance.
+//
+// client.R().SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookie(hc *http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, hc)
+ return r
+}
+
+// SetCookies method sets an array of cookies in the current request instance.
+//
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty's current request
+// client.R().SetCookies(cookies)
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookies(rs []*http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, rs...)
+ return r
+}
+
+// SetLogger method sets given writer for logging Resty request and response details.
+// By default, requests and responses inherit their logger from the client.
+//
+// Compliant to interface `resty.Logger`.
+func (r *Request) SetLogger(l Logger) *Request {
+ r.log = l
+ return r
+}
+
+// SetDebug method enables the debug mode on current request Resty request, It logs
+// the details current request and response.
+// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one.
+// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one.
+//
+// client.R().SetDebug(true)
+func (r *Request) SetDebug(d bool) *Request {
+ r.Debug = d
+ return r
+}
+
+// AddRetryCondition method adds a retry condition function to the request's
+// array of functions that are checked to determine if the request is retried.
+// The request will retry if any of the functions return true and error is nil.
+//
+// Note: These retry conditions are checked before all retry conditions of the client.
+//
+// Since v2.7.0
+func (r *Request) AddRetryCondition(condition RetryConditionFunc) *Request {
+ r.retryConditions = append(r.retryConditions, condition)
+ return r
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP request tracing
+//_______________________________________________________________________
+
+// EnableTrace method enables trace for the current request
+// using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New()
+//
+// resp, err := client.R().EnableTrace().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// See `Client.EnableTrace` available too to get trace info for all requests.
+//
+// Since v2.0.0
+func (r *Request) EnableTrace() *Request {
+ r.trace = true
+ return r
+}
+
+// TraceInfo method returns the trace info for the request.
+// If either the Client or Request EnableTrace function has not been called
+// prior to the request being made, an empty TraceInfo object will be returned.
+//
+// Since v2.0.0
+func (r *Request) TraceInfo() TraceInfo {
+ ct := r.clientTrace
+
+ if ct == nil {
+ return TraceInfo{}
+ }
+
+ ti := TraceInfo{
+ DNSLookup: ct.dnsDone.Sub(ct.dnsStart),
+ TLSHandshake: ct.tlsHandshakeDone.Sub(ct.tlsHandshakeStart),
+ ServerTime: ct.gotFirstResponseByte.Sub(ct.gotConn),
+ IsConnReused: ct.gotConnInfo.Reused,
+ IsConnWasIdle: ct.gotConnInfo.WasIdle,
+ ConnIdleTime: ct.gotConnInfo.IdleTime,
+ RequestAttempt: r.Attempt,
+ }
+
+ // Calculate the total time accordingly,
+ // when connection is reused
+ if ct.gotConnInfo.Reused {
+ ti.TotalTime = ct.endTime.Sub(ct.getConn)
+ } else {
+ ti.TotalTime = ct.endTime.Sub(ct.dnsStart)
+ }
+
+ // Only calculate on successful connections
+ if !ct.connectDone.IsZero() {
+ ti.TCPConnTime = ct.connectDone.Sub(ct.dnsDone)
+ }
+
+ // Only calculate on successful connections
+ if !ct.gotConn.IsZero() {
+ ti.ConnTime = ct.gotConn.Sub(ct.getConn)
+ }
+
+ // Only calculate on successful connections
+ if !ct.gotFirstResponseByte.IsZero() {
+ ti.ResponseTime = ct.endTime.Sub(ct.gotFirstResponseByte)
+ }
+
+ // Capture remote address info when connection is non-nil
+ if ct.gotConnInfo.Conn != nil {
+ ti.RemoteAddr = ct.gotConnInfo.Conn.RemoteAddr()
+ }
+
+ return ti
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP verb method starts here
+//_______________________________________________________________________
+
+// Get method does GET HTTP request. It's defined in section 4.3.1 of RFC7231.
+func (r *Request) Get(url string) (*Response, error) {
+ return r.Execute(MethodGet, url)
+}
+
+// Head method does HEAD HTTP request. It's defined in section 4.3.2 of RFC7231.
+func (r *Request) Head(url string) (*Response, error) {
+ return r.Execute(MethodHead, url)
+}
+
+// Post method does POST HTTP request. It's defined in section 4.3.3 of RFC7231.
+func (r *Request) Post(url string) (*Response, error) {
+ return r.Execute(MethodPost, url)
+}
+
+// Put method does PUT HTTP request. It's defined in section 4.3.4 of RFC7231.
+func (r *Request) Put(url string) (*Response, error) {
+ return r.Execute(MethodPut, url)
+}
+
+// Delete method does DELETE HTTP request. It's defined in section 4.3.5 of RFC7231.
+func (r *Request) Delete(url string) (*Response, error) {
+ return r.Execute(MethodDelete, url)
+}
+
+// Options method does OPTIONS HTTP request. It's defined in section 4.3.7 of RFC7231.
+func (r *Request) Options(url string) (*Response, error) {
+ return r.Execute(MethodOptions, url)
+}
+
+// Patch method does PATCH HTTP request. It's defined in section 2 of RFC5789.
+func (r *Request) Patch(url string) (*Response, error) {
+ return r.Execute(MethodPatch, url)
+}
+
+// Send method performs the HTTP request using the method and URL already defined
+// for current `Request`.
+//
+// req := client.R()
+// req.Method = resty.GET
+// req.URL = "http://httpbin.org/get"
+// resp, err := req.Send()
+func (r *Request) Send() (*Response, error) {
+ return r.Execute(r.Method, r.URL)
+}
+
+// Execute method performs the HTTP request with given HTTP method and URL
+// for current `Request`.
+//
+// resp, err := client.R().Execute(resty.GET, "http://httpbin.org/get")
+func (r *Request) Execute(method, url string) (*Response, error) {
+ var addrs []*net.SRV
+ var resp *Response
+ var err error
+
+ defer func() {
+ if rec := recover(); rec != nil {
+ if err, ok := rec.(error); ok {
+ r.client.onPanicHooks(r, err)
+ } else {
+ r.client.onPanicHooks(r, fmt.Errorf("panic %v", rec))
+ }
+ panic(rec)
+ }
+ }()
+
+ if r.isMultiPart && !(method == MethodPost || method == MethodPut || method == MethodPatch) {
+ // No OnError hook here since this is a request validation error
+ err := fmt.Errorf("multipart content is not allowed in HTTP verb [%v]", method)
+ r.client.onInvalidHooks(r, err)
+ return nil, err
+ }
+
+ if r.SRV != nil {
+ _, addrs, err = net.LookupSRV(r.SRV.Service, "tcp", r.SRV.Domain)
+ if err != nil {
+ r.client.onErrorHooks(r, nil, err)
+ return nil, err
+ }
+ }
+
+ r.Method = method
+ r.URL = r.selectAddr(addrs, url, 0)
+
+ if r.client.RetryCount == 0 {
+ r.Attempt = 1
+ resp, err = r.client.execute(r)
+ r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
+ return resp, unwrapNoRetryErr(err)
+ }
+
+ err = Backoff(
+ func() (*Response, error) {
+ r.Attempt++
+
+ r.URL = r.selectAddr(addrs, url, r.Attempt)
+
+ resp, err = r.client.execute(r)
+ if err != nil {
+ r.log.Warnf("%v, Attempt %v", err, r.Attempt)
+ }
+
+ return resp, err
+ },
+ Retries(r.client.RetryCount),
+ WaitTime(r.client.RetryWaitTime),
+ MaxWaitTime(r.client.RetryMaxWaitTime),
+ RetryConditions(append(r.retryConditions, r.client.RetryConditions...)),
+ RetryHooks(r.client.RetryHooks),
+ ResetMultipartReaders(r.client.RetryResetReaders),
+ )
+
+ if err != nil {
+ r.log.Errorf("%v", err)
+ }
+
+ r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
+ return resp, unwrapNoRetryErr(err)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// SRVRecord struct
+//_______________________________________________________________________
+
+// SRVRecord struct holds the data to query the SRV record for the
+// following service.
+type SRVRecord struct {
+ Service string
+ Domain string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Unexported methods
+//_______________________________________________________________________
+
+func (r *Request) fmtBodyString(sl int64) (body string) {
+ body = "***** NO CONTENT *****"
+ if !isPayloadSupported(r.Method, r.client.AllowGetMethodPayload) {
+ return
+ }
+
+ if _, ok := r.Body.(io.Reader); ok {
+ body = "***** BODY IS io.Reader *****"
+ return
+ }
+
+ // multipart or form-data
+ if r.isMultiPart || r.isFormData {
+ bodySize := int64(r.bodyBuf.Len())
+ if bodySize > sl {
+ body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
+ return
+ }
+ body = r.bodyBuf.String()
+ return
+ }
+
+ // request body data
+ if r.Body == nil {
+ return
+ }
+ var prtBodyBytes []byte
+ var err error
+
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ if canJSONMarshal(contentType, kind) {
+ prtBodyBytes, err = noescapeJSONMarshalIndent(&r.Body)
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ")
+ } else if b, ok := r.Body.(string); ok {
+ if IsJSONType(contentType) {
+ bodyBytes := []byte(b)
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ if err = json.Indent(out, bodyBytes, "", " "); err == nil {
+ prtBodyBytes = out.Bytes()
+ }
+ } else {
+ body = b
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ body = fmt.Sprintf("***** BODY IS byte(s) (size - %d) *****", len(b))
+ return
+ }
+
+ if prtBodyBytes != nil && err == nil {
+ body = string(prtBodyBytes)
+ }
+
+ if len(body) > 0 {
+ bodySize := int64(len([]byte(body)))
+ if bodySize > sl {
+ body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
+ }
+ }
+
+ return
+}
+
+func (r *Request) selectAddr(addrs []*net.SRV, path string, attempt int) string {
+ if addrs == nil {
+ return path
+ }
+
+ idx := attempt % len(addrs)
+ domain := strings.TrimRight(addrs[idx].Target, ".")
+ path = strings.TrimLeft(path, "/")
+
+ return fmt.Sprintf("%s://%s:%d/%s", r.client.scheme, domain, addrs[idx].Port, path)
+}
+
+func (r *Request) initValuesMap() {
+ if r.values == nil {
+ r.values = make(map[string]interface{})
+ }
+}
+
+var noescapeJSONMarshal = func(v interface{}) (*bytes.Buffer, error) {
+ buf := acquireBuffer()
+ encoder := json.NewEncoder(buf)
+ encoder.SetEscapeHTML(false)
+ if err := encoder.Encode(v); err != nil {
+ releaseBuffer(buf)
+ return nil, err
+ }
+
+ return buf, nil
+}
+
+var noescapeJSONMarshalIndent = func(v interface{}) ([]byte, error) {
+ buf := acquireBuffer()
+ defer releaseBuffer(buf)
+
+ encoder := json.NewEncoder(buf)
+ encoder.SetEscapeHTML(false)
+ encoder.SetIndent("", " ")
+
+ if err := encoder.Encode(v); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/response.go b/vendor/github.com/go-resty/resty/v2/response.go
new file mode 100644
index 0000000..bda7787
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/response.go
@@ -0,0 +1,177 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response struct and methods
+//_______________________________________________________________________
+
+// Response struct holds response values of executed request.
+type Response struct {
+ Request *Request
+ RawResponse *http.Response
+
+ body []byte
+ size int64
+ receivedAt time.Time
+}
+
+// Body method returns HTTP response as []byte array for the executed request.
+//
+// Note: `Response.Body` might be nil, if `Request.SetOutput` is used.
+func (r *Response) Body() []byte {
+ if r.RawResponse == nil {
+ return []byte{}
+ }
+ return r.body
+}
+
+// Status method returns the HTTP status string for the executed request.
+//
+// Example: 200 OK
+func (r *Response) Status() string {
+ if r.RawResponse == nil {
+ return ""
+ }
+ return r.RawResponse.Status
+}
+
+// StatusCode method returns the HTTP status code for the executed request.
+//
+// Example: 200
+func (r *Response) StatusCode() int {
+ if r.RawResponse == nil {
+ return 0
+ }
+ return r.RawResponse.StatusCode
+}
+
+// Proto method returns the HTTP response protocol used for the request.
+func (r *Response) Proto() string {
+ if r.RawResponse == nil {
+ return ""
+ }
+ return r.RawResponse.Proto
+}
+
+// Result method returns the response value as an object if it has one
+func (r *Response) Result() interface{} {
+ return r.Request.Result
+}
+
+// Error method returns the error object if it has one
+func (r *Response) Error() interface{} {
+ return r.Request.Error
+}
+
+// Header method returns the response headers
+func (r *Response) Header() http.Header {
+ if r.RawResponse == nil {
+ return http.Header{}
+ }
+ return r.RawResponse.Header
+}
+
+// Cookies method to access all the response cookies
+func (r *Response) Cookies() []*http.Cookie {
+ if r.RawResponse == nil {
+ return make([]*http.Cookie, 0)
+ }
+ return r.RawResponse.Cookies()
+}
+
+// String method returns the body of the server response as String.
+func (r *Response) String() string {
+ if len(r.body) == 0 {
+ return ""
+ }
+ return strings.TrimSpace(string(r.body))
+}
+
+// Time method returns the time of HTTP response time that from request we sent and received a request.
+//
+// See `Response.ReceivedAt` to know when client received response and see `Response.Request.Time` to know
+// when client sent a request.
+func (r *Response) Time() time.Duration {
+ if r.Request.clientTrace != nil {
+ return r.Request.TraceInfo().TotalTime
+ }
+ return r.receivedAt.Sub(r.Request.Time)
+}
+
+// ReceivedAt method returns when response got received from server for the request.
+func (r *Response) ReceivedAt() time.Time {
+ return r.receivedAt
+}
+
+// Size method returns the HTTP response size in bytes. Ya, you can relay on HTTP `Content-Length` header,
+// however it won't be good for chucked transfer/compressed response. Since Resty calculates response size
+// at the client end. You will get actual size of the http response.
+func (r *Response) Size() int64 {
+ return r.size
+}
+
+// RawBody method exposes the HTTP raw response body. Use this method in-conjunction with `SetDoNotParseResponse`
+// option otherwise you get an error as `read err: http: read on closed response body`.
+//
+// Do not forget to close the body, otherwise you might get into connection leaks, no connection reuse.
+// Basically you have taken over the control of response parsing from `Resty`.
+func (r *Response) RawBody() io.ReadCloser {
+ if r.RawResponse == nil {
+ return nil
+ }
+ return r.RawResponse.Body
+}
+
+// IsSuccess method returns true if HTTP status `code >= 200 and <= 299` otherwise false.
+func (r *Response) IsSuccess() bool {
+ return r.StatusCode() > 199 && r.StatusCode() < 300
+}
+
+// IsError method returns true if HTTP status `code >= 400` otherwise false.
+func (r *Response) IsError() bool {
+ return r.StatusCode() > 399
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Unexported methods
+//_______________________________________________________________________
+
+func (r *Response) setReceivedAt() {
+ r.receivedAt = time.Now()
+ if r.Request.clientTrace != nil {
+ r.Request.clientTrace.endTime = r.receivedAt
+ }
+}
+
+func (r *Response) fmtBodyString(sl int64) string {
+ if len(r.body) > 0 {
+ if int64(len(r.body)) > sl {
+ return fmt.Sprintf("***** RESPONSE TOO LARGE (size - %d) *****", len(r.body))
+ }
+ ct := r.Header().Get(hdrContentTypeKey)
+ if IsJSONType(ct) {
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ err := json.Indent(out, r.body, "", " ")
+ if err != nil {
+ return fmt.Sprintf("*** Error: Unable to format response body - \"%s\" ***\n\nLog Body as-is:\n%s", err, r.String())
+ }
+ return out.String()
+ }
+ return r.String()
+ }
+
+ return "***** NO CONTENT *****"
+}
diff --git a/vendor/github.com/go-resty/resty/v2/resty.go b/vendor/github.com/go-resty/resty/v2/resty.go
new file mode 100644
index 0000000..0e88068
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/resty.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+// Package resty provides Simple HTTP and REST client library for Go.
+package resty
+
+import (
+ "net"
+ "net/http"
+ "net/http/cookiejar"
+
+ "golang.org/x/net/publicsuffix"
+)
+
+// Version # of resty
+const Version = "2.9.1"
+
+// New method creates a new Resty client.
+func New() *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ })
+}
+
+// NewWithClient method creates a new Resty client with given `http.Client`.
+func NewWithClient(hc *http.Client) *Client {
+ return createClient(hc)
+}
+
+// NewWithLocalAddr method creates a new Resty client with given Local Address
+// to dial from.
+func NewWithLocalAddr(localAddr net.Addr) *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ Transport: createTransport(localAddr),
+ })
+}
diff --git a/vendor/github.com/go-resty/resty/v2/retry.go b/vendor/github.com/go-resty/resty/v2/retry.go
new file mode 100644
index 0000000..c5eda26
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/retry.go
@@ -0,0 +1,252 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "io"
+ "math"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+const (
+ defaultMaxRetries = 3
+ defaultWaitTime = time.Duration(100) * time.Millisecond
+ defaultMaxWaitTime = time.Duration(2000) * time.Millisecond
+)
+
+type (
+ // Option is to create convenient retry options like wait time, max retries, etc.
+ Option func(*Options)
+
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ RetryConditionFunc func(*Response, error) bool
+
+ // OnRetryFunc is for side-effecting functions triggered on retry
+ OnRetryFunc func(*Response, error)
+
+ // RetryAfterFunc returns time to wait before retry
+ // For example, it can parse HTTP Retry-After header
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ // Non-nil error is returned if it is found that request is not retryable
+ // (0, nil) is a special result means 'use default algorithm'
+ RetryAfterFunc func(*Client, *Response) (time.Duration, error)
+
+ // Options struct is used to hold retry settings.
+ Options struct {
+ maxRetries int
+ waitTime time.Duration
+ maxWaitTime time.Duration
+ retryConditions []RetryConditionFunc
+ retryHooks []OnRetryFunc
+ resetReaders bool
+ }
+)
+
+// Retries sets the max number of retries
+func Retries(value int) Option {
+ return func(o *Options) {
+ o.maxRetries = value
+ }
+}
+
+// WaitTime sets the default wait time to sleep between requests
+func WaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.waitTime = value
+ }
+}
+
+// MaxWaitTime sets the max wait time to sleep between requests
+func MaxWaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.maxWaitTime = value
+ }
+}
+
+// RetryConditions sets the conditions that will be checked for retry.
+func RetryConditions(conditions []RetryConditionFunc) Option {
+ return func(o *Options) {
+ o.retryConditions = conditions
+ }
+}
+
+// RetryHooks sets the hooks that will be executed after each retry
+func RetryHooks(hooks []OnRetryFunc) Option {
+ return func(o *Options) {
+ o.retryHooks = hooks
+ }
+}
+
+// ResetMultipartReaders sets a boolean value which will lead the start being seeked out
+// on all multipart file readers, if they implement io.ReadSeeker
+func ResetMultipartReaders(value bool) Option {
+ return func(o *Options) {
+ o.resetReaders = value
+ }
+}
+
+// Backoff retries with increasing timeout duration up until X amount of retries
+// (Default is 3 attempts, Override with option Retries(n))
+func Backoff(operation func() (*Response, error), options ...Option) error {
+ // Defaults
+ opts := Options{
+ maxRetries: defaultMaxRetries,
+ waitTime: defaultWaitTime,
+ maxWaitTime: defaultMaxWaitTime,
+ retryConditions: []RetryConditionFunc{},
+ }
+
+ for _, o := range options {
+ o(&opts)
+ }
+
+ var (
+ resp *Response
+ err error
+ )
+
+ for attempt := 0; attempt <= opts.maxRetries; attempt++ {
+ resp, err = operation()
+ ctx := context.Background()
+ if resp != nil && resp.Request.ctx != nil {
+ ctx = resp.Request.ctx
+ }
+ if ctx.Err() != nil {
+ return err
+ }
+
+ err1 := unwrapNoRetryErr(err) // raw error, it used for return users callback.
+ needsRetry := err != nil && err == err1 // retry on a few operation errors by default
+
+ for _, condition := range opts.retryConditions {
+ needsRetry = condition(resp, err1)
+ if needsRetry {
+ break
+ }
+ }
+
+ if !needsRetry {
+ return err
+ }
+
+ if opts.resetReaders {
+ if err := resetFileReaders(resp.Request.multipartFiles); err != nil {
+ return err
+ }
+ }
+
+ for _, hook := range opts.retryHooks {
+ hook(resp, err)
+ }
+
+ // Don't need to wait when no retries left.
+ // Still run retry hooks even on last retry to keep compatibility.
+ if attempt == opts.maxRetries {
+ return err
+ }
+
+ waitTime, err2 := sleepDuration(resp, opts.waitTime, opts.maxWaitTime, attempt)
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ return err
+ }
+
+ select {
+ case <-time.After(waitTime):
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ return err
+}
+
+func sleepDuration(resp *Response, min, max time.Duration, attempt int) (time.Duration, error) {
+ const maxInt = 1<<31 - 1 // max int for arch 386
+ if max < 0 {
+ max = maxInt
+ }
+ if resp == nil {
+ return jitterBackoff(min, max, attempt), nil
+ }
+
+ retryAfterFunc := resp.Request.client.RetryAfter
+
+ // Check for custom callback
+ if retryAfterFunc == nil {
+ return jitterBackoff(min, max, attempt), nil
+ }
+
+ result, err := retryAfterFunc(resp.Request.client, resp)
+ if err != nil {
+ return 0, err // i.e. 'API quota exceeded'
+ }
+ if result == 0 {
+ return jitterBackoff(min, max, attempt), nil
+ }
+ if result < 0 || max < result {
+ result = max
+ }
+ if result < min {
+ result = min
+ }
+ return result, nil
+}
+
+// Return capped exponential backoff with jitter
+// http://www.awsarchitectureblog.com/2015/03/backoff.html
+func jitterBackoff(min, max time.Duration, attempt int) time.Duration {
+ base := float64(min)
+ capLevel := float64(max)
+
+ temp := math.Min(capLevel, base*math.Exp2(float64(attempt)))
+ ri := time.Duration(temp / 2)
+ if ri == 0 {
+ ri = time.Nanosecond
+ }
+ result := randDuration(ri)
+
+ if result < min {
+ result = min
+ }
+
+ return result
+}
+
+var rnd = newRnd()
+var rndMu sync.Mutex
+
+func randDuration(center time.Duration) time.Duration {
+ rndMu.Lock()
+ defer rndMu.Unlock()
+
+ var ri = int64(center)
+ var jitter = rnd.Int63n(ri)
+ return time.Duration(math.Abs(float64(ri + jitter)))
+}
+
+func newRnd() *rand.Rand {
+ var seed = time.Now().UnixNano()
+ var src = rand.NewSource(seed)
+ return rand.New(src)
+}
+
+func resetFileReaders(files []*File) error {
+ for _, f := range files {
+ if rs, ok := f.Reader.(io.ReadSeeker); ok {
+ if _, err := rs.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/trace.go b/vendor/github.com/go-resty/resty/v2/trace.go
new file mode 100644
index 0000000..be7555c
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/trace.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http/httptrace"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// TraceInfo struct
+//_______________________________________________________________________
+
+// TraceInfo struct is used provide request trace info such as DNS lookup
+// duration, Connection obtain duration, Server processing duration, etc.
+//
+// Since v2.0.0
+type TraceInfo struct {
+ // DNSLookup is a duration that transport took to perform
+ // DNS lookup.
+ DNSLookup time.Duration
+
+ // ConnTime is a duration that took to obtain a successful connection.
+ ConnTime time.Duration
+
+ // TCPConnTime is a duration that took to obtain the TCP connection.
+ TCPConnTime time.Duration
+
+ // TLSHandshake is a duration that TLS handshake took place.
+ TLSHandshake time.Duration
+
+ // ServerTime is a duration that server took to respond first byte.
+ ServerTime time.Duration
+
+ // ResponseTime is a duration since first response byte from server to
+ // request completion.
+ ResponseTime time.Duration
+
+ // TotalTime is a duration that total request took end-to-end.
+ TotalTime time.Duration
+
+ // IsConnReused is whether this connection has been previously
+ // used for another HTTP request.
+ IsConnReused bool
+
+ // IsConnWasIdle is whether this connection was obtained from an
+ // idle pool.
+ IsConnWasIdle bool
+
+ // ConnIdleTime is a duration how long the connection was previously
+ // idle, if IsConnWasIdle is true.
+ ConnIdleTime time.Duration
+
+ // RequestAttempt is to represent the request attempt made during a Resty
+ // request execution flow, including retry count.
+ RequestAttempt int
+
+ // RemoteAddr returns the remote network address.
+ RemoteAddr net.Addr
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// ClientTrace struct and its methods
+//_______________________________________________________________________
+
+// tracer struct maps the `httptrace.ClientTrace` hooks into Fields
+// with same naming for easy understanding. Plus additional insights
+// Request.
+type clientTrace struct {
+ getConn time.Time
+ dnsStart time.Time
+ dnsDone time.Time
+ connectDone time.Time
+ tlsHandshakeStart time.Time
+ tlsHandshakeDone time.Time
+ gotConn time.Time
+ gotFirstResponseByte time.Time
+ endTime time.Time
+ gotConnInfo httptrace.GotConnInfo
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Trace unexported methods
+//_______________________________________________________________________
+
+func (t *clientTrace) createContext(ctx context.Context) context.Context {
+ return httptrace.WithClientTrace(
+ ctx,
+ &httptrace.ClientTrace{
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ t.dnsStart = time.Now()
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ t.dnsDone = time.Now()
+ },
+ ConnectStart: func(_, _ string) {
+ if t.dnsDone.IsZero() {
+ t.dnsDone = time.Now()
+ }
+ if t.dnsStart.IsZero() {
+ t.dnsStart = t.dnsDone
+ }
+ },
+ ConnectDone: func(net, addr string, err error) {
+ t.connectDone = time.Now()
+ },
+ GetConn: func(_ string) {
+ t.getConn = time.Now()
+ },
+ GotConn: func(ci httptrace.GotConnInfo) {
+ t.gotConn = time.Now()
+ t.gotConnInfo = ci
+ },
+ GotFirstResponseByte: func() {
+ t.gotFirstResponseByte = time.Now()
+ },
+ TLSHandshakeStart: func() {
+ t.tlsHandshakeStart = time.Now()
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, _ error) {
+ t.tlsHandshakeDone = time.Now()
+ },
+ },
+ )
+}
diff --git a/vendor/github.com/go-resty/resty/v2/transport.go b/vendor/github.com/go-resty/resty/v2/transport.go
new file mode 100644
index 0000000..191cd51
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/transport.go
@@ -0,0 +1,36 @@
+//go:build go1.13
+// +build go1.13
+
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func createTransport(localAddr net.Addr) *http.Transport {
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ if localAddr != nil {
+ dialer.LocalAddr = localAddr
+ }
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: transportDialContext(dialer),
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/transport112.go b/vendor/github.com/go-resty/resty/v2/transport112.go
new file mode 100644
index 0000000..d4aa417
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/transport112.go
@@ -0,0 +1,35 @@
+//go:build !go1.13
+// +build !go1.13
+
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func createTransport(localAddr net.Addr) *http.Transport {
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ if localAddr != nil {
+ dialer.LocalAddr = localAddr
+ }
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/transport_js.go b/vendor/github.com/go-resty/resty/v2/transport_js.go
new file mode 100644
index 0000000..6227aa9
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/transport_js.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js && wasm
+// +build js,wasm
+
+package resty
+
+import (
+ "context"
+ "net"
+)
+
+func transportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) {
+ return nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/transport_other.go b/vendor/github.com/go-resty/resty/v2/transport_other.go
new file mode 100644
index 0000000..73553c3
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/transport_other.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(js && wasm)
+// +build !js !wasm
+
+package resty
+
+import (
+ "context"
+ "net"
+)
+
+func transportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) {
+ return dialer.DialContext
+}
diff --git a/vendor/github.com/go-resty/resty/v2/util.go b/vendor/github.com/go-resty/resty/v2/util.go
new file mode 100644
index 0000000..3279116
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/util.go
@@ -0,0 +1,402 @@
+// Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Logger interface
+//_______________________________________________________________________
+
+// Logger interface is to abstract the logging from Resty. Gives control to
+// the Resty users, choice of the logger.
+type Logger interface {
+ Errorf(format string, v ...interface{})
+ Warnf(format string, v ...interface{})
+ Debugf(format string, v ...interface{})
+}
+
+func createLogger() *logger {
+ l := &logger{l: log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds)}
+ return l
+}
+
+var _ Logger = (*logger)(nil)
+
+type logger struct {
+ l *log.Logger
+}
+
+func (l *logger) Errorf(format string, v ...interface{}) {
+ l.output("ERROR RESTY "+format, v...)
+}
+
+func (l *logger) Warnf(format string, v ...interface{}) {
+ l.output("WARN RESTY "+format, v...)
+}
+
+func (l *logger) Debugf(format string, v ...interface{}) {
+ l.output("DEBUG RESTY "+format, v...)
+}
+
+func (l *logger) output(format string, v ...interface{}) {
+ if len(v) == 0 {
+ l.l.Print(format)
+ return
+ }
+ l.l.Printf(format, v...)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Rate Limiter interface
+//_______________________________________________________________________
+
+type RateLimiter interface {
+ Allow() bool
+}
+
+var ErrRateLimitExceeded = errors.New("rate limit exceeded")
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Helper methods
+//_______________________________________________________________________
+
+// IsStringEmpty method tells whether given string is empty or not
+func IsStringEmpty(str string) bool {
+ return len(strings.TrimSpace(str)) == 0
+}
+
+// DetectContentType method is used to figure out `Request.Body` content type for request header
+func DetectContentType(body interface{}) string {
+ contentType := plainTextType
+ kind := kindOf(body)
+ switch kind {
+ case reflect.Struct, reflect.Map:
+ contentType = jsonContentType
+ case reflect.String:
+ contentType = plainTextType
+ default:
+ if b, ok := body.([]byte); ok {
+ contentType = http.DetectContentType(b)
+ } else if kind == reflect.Slice {
+ contentType = jsonContentType
+ }
+ }
+
+ return contentType
+}
+
+// IsJSONType method is to check JSON content type or not
+func IsJSONType(ct string) bool {
+ return jsonCheck.MatchString(ct)
+}
+
+// IsXMLType method is to check XML content type or not
+func IsXMLType(ct string) bool {
+ return xmlCheck.MatchString(ct)
+}
+
+// Unmarshalc content into object from JSON or XML
+func Unmarshalc(c *Client, ct string, b []byte, d interface{}) (err error) {
+ if IsJSONType(ct) {
+ err = c.JSONUnmarshal(b, d)
+ } else if IsXMLType(ct) {
+ err = c.XMLUnmarshal(b, d)
+ }
+
+ return
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// RequestLog and ResponseLog type
+//_______________________________________________________________________
+
+// RequestLog struct is used to collected information from resty request
+// instance for debug logging. It sent to request log callback before resty
+// actually logs the information.
+type RequestLog struct {
+ Header http.Header
+ Body string
+}
+
+// ResponseLog struct is used to collected information from resty response
+// instance for debug logging. It sent to response log callback before resty
+// actually logs the information.
+type ResponseLog struct {
+ Header http.Header
+ Body string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+// way to disable the HTML escape as opt-in
+func jsonMarshal(c *Client, r *Request, d interface{}) (*bytes.Buffer, error) {
+ if !r.jsonEscapeHTML || !c.jsonEscapeHTML {
+ return noescapeJSONMarshal(d)
+ }
+
+ data, err := c.JSONMarshal(d)
+ if err != nil {
+ return nil, err
+ }
+
+ buf := acquireBuffer()
+ _, _ = buf.Write(data)
+ return buf, nil
+}
+
+func firstNonEmpty(v ...string) string {
+ for _, s := range v {
+ if !IsStringEmpty(s) {
+ return s
+ }
+ }
+ return ""
+}
+
+var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
+
+func escapeQuotes(s string) string {
+ return quoteEscaper.Replace(s)
+}
+
+func createMultipartHeader(param, fileName, contentType string) textproto.MIMEHeader {
+ hdr := make(textproto.MIMEHeader)
+
+ var contentDispositionValue string
+ if IsStringEmpty(fileName) {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"`, param)
+ } else {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
+ param, escapeQuotes(fileName))
+ }
+ hdr.Set("Content-Disposition", contentDispositionValue)
+
+ if !IsStringEmpty(contentType) {
+ hdr.Set(hdrContentTypeKey, contentType)
+ }
+ return hdr
+}
+
+func addMultipartFormField(w *multipart.Writer, mf *MultipartField) error {
+ partWriter, err := w.CreatePart(createMultipartHeader(mf.Param, mf.FileName, mf.ContentType))
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, mf.Reader)
+ return err
+}
+
+func writeMultipartFormFile(w *multipart.Writer, fieldName, fileName string, r io.Reader) error {
+ // Auto detect actual multipart content type
+ cbuf := make([]byte, 512)
+ size, err := r.Read(cbuf)
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf)))
+ if err != nil {
+ return err
+ }
+
+ if _, err = partWriter.Write(cbuf[:size]); err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, r)
+ return err
+}
+
+func addFile(w *multipart.Writer, fieldName, path string) error {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer closeq(file)
+ return writeMultipartFormFile(w, fieldName, filepath.Base(path), file)
+}
+
+func addFileReader(w *multipart.Writer, f *File) error {
+ return writeMultipartFormFile(w, f.ParamName, f.Name, f.Reader)
+}
+
+func getPointer(v interface{}) interface{} {
+ vv := valueOf(v)
+ if vv.Kind() == reflect.Ptr {
+ return v
+ }
+ return reflect.New(vv.Type()).Interface()
+}
+
+func isPayloadSupported(m string, allowMethodGet bool) bool {
+ return !(m == MethodHead || m == MethodOptions || (m == MethodGet && !allowMethodGet))
+}
+
+func typeOf(i interface{}) reflect.Type {
+ return indirect(valueOf(i)).Type()
+}
+
+func valueOf(i interface{}) reflect.Value {
+ return reflect.ValueOf(i)
+}
+
+func indirect(v reflect.Value) reflect.Value {
+ return reflect.Indirect(v)
+}
+
+func kindOf(v interface{}) reflect.Kind {
+ return typeOf(v).Kind()
+}
+
+func createDirectory(dir string) (err error) {
+ if _, err = os.Stat(dir); err != nil {
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+func canJSONMarshal(contentType string, kind reflect.Kind) bool {
+ return IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice)
+}
+
+func functionName(i interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
+}
+
+func acquireBuffer() *bytes.Buffer {
+ return bufPool.Get().(*bytes.Buffer)
+}
+
+func releaseBuffer(buf *bytes.Buffer) {
+ if buf != nil {
+ buf.Reset()
+ bufPool.Put(buf)
+ }
+}
+
+// requestBodyReleaser wraps requests's body and implements custom Close for it.
+// The Close method closes original body and releases request body back to sync.Pool.
+type requestBodyReleaser struct {
+ releaseOnce sync.Once
+ reqBuf *bytes.Buffer
+ io.ReadCloser
+}
+
+func newRequestBodyReleaser(respBody io.ReadCloser, reqBuf *bytes.Buffer) io.ReadCloser {
+ if reqBuf == nil {
+ return respBody
+ }
+
+ return &requestBodyReleaser{
+ reqBuf: reqBuf,
+ ReadCloser: respBody,
+ }
+}
+
+func (rr *requestBodyReleaser) Close() error {
+ err := rr.ReadCloser.Close()
+ rr.releaseOnce.Do(func() {
+ releaseBuffer(rr.reqBuf)
+ })
+
+ return err
+}
+
+func closeq(v interface{}) {
+ if c, ok := v.(io.Closer); ok {
+ silently(c.Close())
+ }
+}
+
+func silently(_ ...interface{}) {}
+
+func composeHeaders(c *Client, r *Request, hdrs http.Header) string {
+ str := make([]string, 0, len(hdrs))
+ for _, k := range sortHeaderKeys(hdrs) {
+ var v string
+ if k == "Cookie" {
+ cv := strings.TrimSpace(strings.Join(hdrs[k], ", "))
+ if c.GetClient().Jar != nil {
+ for _, c := range c.GetClient().Jar.Cookies(r.RawRequest.URL) {
+ if cv != "" {
+ cv = cv + "; " + c.String()
+ } else {
+ cv = c.String()
+ }
+ }
+ }
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, cv))
+ } else {
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, strings.Join(hdrs[k], ", ")))
+ }
+ if v != "" {
+ str = append(str, "\t"+v)
+ }
+ }
+ return strings.Join(str, "\n")
+}
+
+func sortHeaderKeys(hdrs http.Header) []string {
+ keys := make([]string, 0, len(hdrs))
+ for key := range hdrs {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func copyHeaders(hdrs http.Header) http.Header {
+ nh := http.Header{}
+ for k, v := range hdrs {
+ nh[k] = v
+ }
+ return nh
+}
+
+type noRetryErr struct {
+ err error
+}
+
+func (e *noRetryErr) Error() string {
+ return e.err.Error()
+}
+
+func wrapNoRetryErr(err error) error {
+ if err != nil {
+ err = &noRetryErr{err: err}
+ }
+ return err
+}
+
+func unwrapNoRetryErr(err error) error {
+ if e, ok := err.(*noRetryErr); ok {
+ err = e.err
+ }
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore
new file mode 100644
index 0000000..2de28da
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.gitignore
@@ -0,0 +1,9 @@
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+Icon?
+ehthumbs.db
+Thumbs.db
+.idea
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
new file mode 100644
index 0000000..fb1478c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -0,0 +1,126 @@
+# This is the official list of Go-MySQL-Driver authors for copyright purposes.
+
+# If you are submitting a patch, please add your name or the name of the
+# organization which holds the copyright to this list in alphabetical order.
+
+# Names should be added to this file as
+# Name
+# The email address is not required for organizations.
+# Please keep the list sorted.
+
+
+# Individual Persons
+
+Aaron Hopkins
+Achille Roussel
+Alex Snast
+Alexey Palazhchenko
+Andrew Reid
+Animesh Ray
+Arne Hormann
+Ariel Mashraki
+Asta Xie
+Bulat Gaifullin
+Caine Jette
+Carlos Nieto
+Chris Kirkland
+Chris Moos
+Craig Wilson
+Daniel Montoya
+Daniel Nichter
+Daniël van Eeden
+Dave Protasowski
+DisposaBoy
+Egor Smolyakov
+Erwan Martin
+Evan Shaw
+Frederick Mayle
+Gustavo Kristic
+Hajime Nakagami
+Hanno Braun
+Henri Yandell
+Hirotaka Yamamoto
+Huyiguang
+ICHINOSE Shogo
+Ilia Cimpoes
+INADA Naoki
+Jacek Szwec
+James Harr
+Janek Vedock
+Jeff Hodges
+Jeffrey Charles
+Jerome Meyer
+Jiajia Zhong
+Jian Zhen
+Joshua Prunier
+Julien Lefevre
+Julien Schmidt
+Justin Li
+Justin Nuß
+Kamil Dziedzic
+Kei Kamikawa
+Kevin Malachowski
+Kieron Woodhouse
+Lance Tian
+Lennart Rudolph
+Leonardo YongUk Kim
+Linh Tran Tuan
+Lion Yang
+Luca Looz
+Lucas Liu
+Lunny Xiao
+Luke Scott
+Maciej Zimnoch
+Michael Woolnough
+Nathanial Murphy
+Nicola Peduzzi
+Olivier Mengué
+oscarzhao
+Paul Bonser
+Peter Schultz
+Phil Porada
+Rebecca Chin
+Reed Allman
+Richard Wilkes
+Robert Russell
+Runrioter Wung
+Samantha Frank
+Santhosh Kumar Tekuri
+Sho Iizuka
+Sho Ikeda
+Shuode Li
+Simon J Mudd
+Soroush Pour
+Stan Putrya
+Stanley Gunawan
+Steven Hartland
+Tan Jinhua <312841925 at qq.com>
+Thomas Wodarek
+Tim Ruffles
+Tom Jenkinson
+Vladimir Kovpak
+Vladyslav Zhelezniak
+Xiangyu Hu
+Xiaobing Jiang
+Xiuming Chen
+Xuehong Chan
+Zhenye Xie
+Zhixin Wen
+Ziheng Lyu
+
+# Organizations
+
+Barracuda Networks, Inc.
+Counting Ltd.
+DigitalOcean Inc.
+dyves labs AG
+Facebook Inc.
+GitHub Inc.
+Google Inc.
+InfoSum Ltd.
+Keybase Inc.
+Multiplay Ltd.
+Percona LLC
+Pivotal Inc.
+Stripe Inc.
+Zendesk Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
new file mode 100644
index 0000000..5166e4a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -0,0 +1,266 @@
+## Version 1.7.1 (2023-04-25)
+
+Changes:
+
+ - bump actions/checkout@v3 and actions/setup-go@v3 (#1375)
+ - Add go1.20 and mariadb10.11 to the testing matrix (#1403)
+ - Increase default maxAllowedPacket size. (#1411)
+
+Bugfixes:
+
+ - Use SET syntax as specified in the MySQL documentation (#1402)
+
+
+## Version 1.7 (2022-11-29)
+
+Changes:
+
+ - Drop support of Go 1.12 (#1211)
+ - Refactoring `(*textRows).readRow` in a more clear way (#1230)
+ - util: Reduce boundary check in escape functions. (#1316)
+ - enhancement for mysqlConn handleAuthResult (#1250)
+
+New Features:
+
+ - support Is comparison on MySQLError (#1210)
+ - return unsigned in database type name when necessary (#1238)
+ - Add API to express like a --ssl-mode=PREFERRED MySQL client (#1370)
+ - Add SQLState to MySQLError (#1321)
+
+Bugfixes:
+
+ - Fix parsing 0 year. (#1257)
+
+
+## Version 1.6 (2021-04-01)
+
+Changes:
+
+ - Migrate the CI service from travis-ci to GitHub Actions (#1176, #1183, #1190)
+ - `NullTime` is deprecated (#960, #1144)
+ - Reduce allocations when building SET command (#1111)
+ - Performance improvement for time formatting (#1118)
+ - Performance improvement for time parsing (#1098, #1113)
+
+New Features:
+
+ - Implement `driver.Validator` interface (#1106, #1174)
+ - Support returning `uint64` from `Valuer` in `ConvertValue` (#1143)
+ - Add `json.RawMessage` for converter and prepared statement (#1059)
+ - Interpolate `json.RawMessage` as `string` (#1058)
+ - Implements `CheckNamedValue` (#1090)
+
+Bugfixes:
+
+ - Stop rounding times (#1121, #1172)
+ - Put zero filler into the SSL handshake packet (#1066)
+ - Fix checking cancelled connections back into the connection pool (#1095)
+ - Fix remove last 0 byte for mysql_old_password when password is empty (#1133)
+
+
+## Version 1.5 (2020-01-07)
+
+Changes:
+
+ - Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017)
+ - Improve buffer handling (#890)
+ - Document potentially insecure TLS configs (#901)
+ - Use a double-buffering scheme to prevent data races (#943)
+ - Pass uint64 values without converting them to string (#838, #955)
+ - Update collations and make utf8mb4 default (#877, #1054)
+ - Make NullTime compatible with sql.NullTime in Go 1.13+ (#995)
+ - Removed CloudSQL support (#993, #1007)
+ - Add Go Module support (#1003)
+
+New Features:
+
+ - Implement support of optional TLS (#900)
+ - Check connection liveness (#934, #964, #997, #1048, #1051, #1052)
+ - Implement Connector Interface (#941, #958, #1020, #1035)
+
+Bugfixes:
+
+ - Mark connections as bad on error during ping (#875)
+ - Mark connections as bad on error during dial (#867)
+ - Fix connection leak caused by rapid context cancellation (#1024)
+ - Mark connections as bad on error during Conn.Prepare (#1030)
+
+
+## Version 1.4.1 (2018-11-14)
+
+Bugfixes:
+
+ - Fix TIME format for binary columns (#818)
+ - Fix handling of empty auth plugin names (#835)
+ - Fix caching_sha2_password with empty password (#826)
+ - Fix canceled context broke mysqlConn (#862)
+ - Fix OldAuthSwitchRequest support (#870)
+ - Fix Auth Response packet for cleartext password (#887)
+
+## Version 1.4 (2018-06-03)
+
+Changes:
+
+ - Documentation fixes (#530, #535, #567)
+ - Refactoring (#575, #579, #580, #581, #603, #615, #704)
+ - Cache column names (#444)
+ - Sort the DSN parameters in DSNs generated from a config (#637)
+ - Allow native password authentication by default (#644)
+ - Use the default port if it is missing in the DSN (#668)
+ - Removed the `strict` mode (#676)
+ - Do not query `max_allowed_packet` by default (#680)
+ - Dropped support Go 1.6 and lower (#696)
+ - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
+ - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
+ - Improved the compatibility of the authentication system (#807)
+
+New Features:
+
+ - Multi-Results support (#537)
+ - `rejectReadOnly` DSN option (#604)
+ - `context.Context` support (#608, #612, #627, #761)
+ - Transaction isolation level support (#619, #744)
+ - Read-Only transactions support (#618, #634)
+ - `NewConfig` function which initializes a config with default values (#679)
+ - Implemented the `ColumnType` interfaces (#667, #724)
+ - Support for custom string types in `ConvertValue` (#623)
+ - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
+ - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
+ - Implemented `driver.SessionResetter` (#779)
+ - `sha256_password` authentication plugin support (#808)
+
+Bugfixes:
+
+ - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
+ - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
+ - Removed columns definition cache since it sometimes cached invalid data (#592)
+ - Don't mutate registered TLS configs (#600)
+ - Make RegisterTLSConfig concurrency-safe (#613)
+ - Handle missing auth data in the handshake packet correctly (#646)
+ - Do not retry queries when data was written to avoid data corruption (#302, #736)
+ - Cache the connection pointer for error handling before invalidating it (#678)
+ - Fixed imports for appengine/cloudsql (#700)
+ - Fix sending STMT_LONG_DATA for 0 byte data (#734)
+ - Set correct capacity for []bytes read from length-encoded strings (#766)
+ - Make RegisterDial concurrency-safe (#773)
+
+
+## Version 1.3 (2016-12-01)
+
+Changes:
+
+ - Go 1.1 is no longer supported
+ - Use decimals fields in MySQL to format time types (#249)
+ - Buffer optimizations (#269)
+ - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
+ - Documented DSN system var quoting rules (#502)
+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
+
+New Features:
+
+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+ - Native password authentication plugin support (#494, #524)
+
+Bugfixes:
+
+ - Fixed handling of queries without columns and rows (#255)
+ - Fixed a panic when SetKeepAlive() failed (#298)
+ - Handle ERR packets while reading rows (#321)
+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
+ - Actually zero out bytes in handshake response (#378)
+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
+ - Fixed tests with MySQL 5.7.9+ (#380)
+ - QueryUnescape TLS config names (#397)
+ - Fixed "broken pipe" error by writing to closed socket (#390)
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
+ - Invalidate connection on error while discarding additional results (#513)
+ - Allow terminating packets of length 0 (#516)
+
+
+## Version 1.2 (2014-06-03)
+
+Changes:
+
+ - We switched back to a "rolling release". `go get` installs the current master branch again
+ - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
+ - Exported errors to allow easy checking from application code
+ - Enabled TCP Keepalives on TCP connections
+ - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
+ - The DSN parser also checks for a missing separating slash
+ - Faster binary date / datetime to string formatting
+ - Also exported the MySQLWarning type
+ - mysqlConn.Close returns the first error encountered instead of ignoring all errors
+ - writePacket() automatically writes the packet size to the header
+ - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+
+New Features:
+
+ - `RegisterDial` allows the usage of a custom dial function to establish the network connection
+ - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
+ - Logging of critical errors is configurable with `SetLogger`
+ - Google CloudSQL support
+
+Bugfixes:
+
+ - Allow more than 32 parameters in prepared statements
+ - Various old_password fixes
+ - Fixed TestConcurrent test to pass Go's race detection
+ - Fixed appendLengthEncodedInteger for large numbers
+ - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
+
+
+## Version 1.1 (2013-11-02)
+
+Changes:
+
+ - Go-MySQL-Driver now requires Go 1.1
+ - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
+ - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
+ - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
+ - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
+ - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
+ - Optimized the buffer for reading
+ - stmt.Query now caches column metadata
+ - New Logo
+ - Changed the copyright header to include all contributors
+ - Improved the LOAD INFILE documentation
+ - The driver struct is now exported to make the driver directly accessible
+ - Refactored the driver tests
+ - Added more benchmarks and moved all to a separate file
+ - Other small refactoring
+
+New Features:
+
+ - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
+ - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
+ - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
+
+Bugfixes:
+
+ - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
+ - Convert to DB timezone when inserting `time.Time`
+ - Splitted packets (more than 16MB) are now merged correctly
+ - Fixed false positive `io.EOF` errors when the data was fully read
+ - Avoid panics on reuse of closed connections
+ - Fixed empty string producing false nil values
+ - Fixed sign byte for positive TIME fields
+
+
+## Version 1.0 (2013-05-14)
+
+Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
new file mode 100644
index 0000000..14e2f77
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
new file mode 100644
index 0000000..3b5d229
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -0,0 +1,531 @@
+# Go-MySQL-Driver
+
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
+
+![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
+
+---------------------------------------
+ * [Features](#features)
+ * [Requirements](#requirements)
+ * [Installation](#installation)
+ * [Usage](#usage)
+ * [DSN (Data Source Name)](#dsn-data-source-name)
+ * [Password](#password)
+ * [Protocol](#protocol)
+ * [Address](#address)
+ * [Parameters](#parameters)
+ * [Examples](#examples)
+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
+ * [context.Context Support](#contextcontext-support)
+ * [ColumnType Support](#columntype-support)
+ * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
+ * [time.Time support](#timetime-support)
+ * [Unicode support](#unicode-support)
+ * [Testing / Development](#testing--development)
+ * [License](#license)
+
+---------------------------------------
+
+## Features
+ * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
+ * Native Go implementation. No C-bindings, just pure Go
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Automatic handling of broken connections
+ * Automatic Connection Pooling *(by database/sql package)*
+ * Supports queries larger than 16MB
+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
+ * Intelligent `LONG DATA` handling in prepared statements
+ * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
+ * Optional `time.Time` parsing
+ * Optional placeholder interpolation
+
+## Requirements
+ * Go 1.13 or higher. We aim to support the 3 latest versions of Go.
+ * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+---------------------------------------
+
+## Installation
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
+```bash
+$ go get -u github.com/go-sql-driver/mysql
+```
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
+
+## Usage
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
+
+Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
+
+```go
+import (
+ "database/sql"
+ "time"
+
+ _ "github.com/go-sql-driver/mysql"
+)
+
+// ...
+
+db, err := sql.Open("mysql", "user:password@/dbname")
+if err != nil {
+ panic(err)
+}
+// See "Important settings" section.
+db.SetConnMaxLifetime(time.Minute * 3)
+db.SetMaxOpenConns(10)
+db.SetMaxIdleConns(10)
+```
+
+[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
+
+### Important settings
+
+`db.SetConnMaxLifetime()` is required to ensure connections are closed by the driver safely before connection is closed by MySQL server, OS, or other middlewares. Since some middlewares close idle connections by 5 minutes, we recommend timeout shorter than 5 minutes. This setting helps load balancing and changing system variables too.
+
+`db.SetMaxOpenConns()` is highly recommended to limit the number of connection used by the application. There is no recommended limit number because it depends on application and MySQL server.
+
+`db.SetMaxIdleConns()` is recommended to be set same to `db.SetMaxOpenConns()`. When it is smaller than `SetMaxOpenConns()`, connections can be opened and closed much more frequently than you expect. Idle connections can be closed by the `db.SetConnMaxLifetime()`. If you want to close idle connections more rapidly, you can use `db.SetConnMaxIdleTime()` since Go 1.15.
+
+
+### DSN (Data Source Name)
+
+The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
+```
+[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
+```
+
+A DSN in its fullest form:
+```
+username:password@protocol(address)/dbname?param=value
+```
+
+Except for the databasename, all values are optional. So the minimal DSN is:
+```
+/dbname
+```
+
+If you do not want to preselect a database, leave `dbname` empty:
+```
+/
+```
+This has the same effect as an empty DSN string:
+```
+
+```
+
+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
+
+#### Password
+Passwords can consist of any character. Escaping is **not** necessary.
+
+#### Protocol
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
+In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+
+#### Address
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
+If `host` is a literal IPv6 address, it must be enclosed in square brackets.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+
+For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
+
+#### Parameters
+*Parameters are case-sensitive!*
+
+Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
+
+##### `allowAllFiles`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
+[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+
+##### `allowCleartextPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](https://dev.mysql.com/doc/en/cleartext-pluggable-authentication.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+
+
+##### `allowFallbackToPlaintext`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowFallbackToPlaintext=true` acts like a `--ssl-mode=PREFERRED` MySQL client as described in [Command Options for Connecting to the Server](https://dev.mysql.com/doc/refman/5.7/en/connection-options.html#option_general_ssl-mode)
+
+##### `allowNativePasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
+
+##### `allowOldPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
+
+##### `charset`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+
+Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
+Unless you need the fallback behavior, please use `collation` instead.
+
+##### `checkConnLiveness`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+
+On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection.
+`checkConnLiveness=false` disables this liveness check of connections.
+
+##### `collation`
+
+```
+Type: string
+Valid Values:
+Default: utf8mb4_general_ci
+```
+
+Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
+
+A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
+
+The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
+
+Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
+
+
+##### `clientFoundRows`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
+
+##### `columnsWithAlias`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
+
+```
+SELECT u.id FROM users as u
+```
+
+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+
+##### `interpolateParams`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
+
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are rejected as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
+
+##### `loc`
+
+```
+Type: string
+Valid Values:
+Default: UTC
+```
+
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
+
+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
+
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+
+##### `maxAllowedPacket`
+```
+Type: decimal number
+Default: 64*1024*1024
+```
+
+Max packet size allowed in bytes. The default value is 64 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
+
+##### `multiStatements`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement.
+
+##### `parseTime`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
+
+
+##### `readTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+##### `rejectReadOnly`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
+
+
+##### `serverPubKey`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
+Public keys are used to transmit encrypted data, e.g. for authentication.
+If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
+
+
+##### `timeout`
+
+```
+Type: duration
+Default: OS default
+```
+
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### `tls`
+
+```
+Type: bool / string
+Valid Values: true, false, skip-verify, preferred,
+Default: false
+```
+
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
+
+##### `writeTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### System Variables
+
+Any other parameters are interpreted as system variables:
+ * `=`: `SET =`
+ * `=`: `SET =`
+ * `=%27%27`: `SET =''`
+
+Rules:
+* The values for string variables must be quoted with `'`.
+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
+ (which implies values of string variables must be wrapped with `%27`).
+
+Examples:
+ * `autocommit=1`: `SET autocommit=1`
+ * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
+ * [`transaction_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation): `SET transaction_isolation='REPEATABLE-READ'`
+
+
+#### Examples
+```
+user@unix(/path/to/socket)/dbname
+```
+
+```
+root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
+```
+
+```
+user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
+```
+
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
+```
+user:password@/dbname?sql_mode=TRADITIONAL
+```
+
+TCP via IPv6:
+```
+user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
+```
+
+TCP on a remote host, e.g. Amazon RDS:
+```
+id:password@tcp(your-amazonaws-uri.com:3306)/dbname
+```
+
+Google Cloud SQL on App Engine:
+```
+user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname
+```
+
+TCP using default port (3306) on localhost:
+```
+user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
+```
+
+Use the default protocol (tcp) and host (localhost:3306):
+```
+user:password@/dbname
+```
+
+No Database preselected:
+```
+user:password@/
+```
+
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `BIGINT`.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
+
+### `LOAD DATA LOCAL INFILE` support
+For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
+```go
+import "github.com/go-sql-driver/mysql"
+```
+
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+
+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
+
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+
+
+### `time.Time` support
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
+
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+
+**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
+
+
+### Unicode support
+Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
+
+Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+
+Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+
+See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
+
+## Testing / Development
+To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
+
+Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
+If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
+
+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/.github/CONTRIBUTING.md) for details.
+
+---------------------------------------
+
+## License
+Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+
+Mozilla summarizes the license scope as follows:
+> MPL: The copyleft applies to any files containing MPLed code.
+
+
+That means:
+ * You can **use** the **unchanged** source code both in private and commercially.
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
+
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
+
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
+
+![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
new file mode 100644
index 0000000..1b7e19f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
@@ -0,0 +1,19 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
+//
+// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+//go:build go1.19
+// +build go1.19
+
+package mysql
+
+import "sync/atomic"
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+type atomicBool = atomic.Bool
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
new file mode 100644
index 0000000..2e9a7f0
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
@@ -0,0 +1,47 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
+//
+// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+//go:build !go1.19
+// +build !go1.19
+
+package mysql
+
+import "sync/atomic"
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// atomicBool is an implementation of atomic.Bool for older version of Go.
+// it is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _ noCopy
+ value uint32
+}
+
+// Load returns whether the current boolean value is true
+func (ab *atomicBool) Load() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Store sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Store(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// Swap sets the value of the bool and returns the old value.
+func (ab *atomicBool) Swap(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) > 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
new file mode 100644
index 0000000..1ff203e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -0,0 +1,437 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "sync"
+)
+
+// server pub keys registry
+var (
+ serverPubKeyLock sync.RWMutex
+ serverPubKeyRegistry map[string]*rsa.PublicKey
+)
+
+// RegisterServerPubKey registers a server RSA public key which can be used to
+// send data in a secure manner to the server without receiving the public key
+// in a potentially insecure way from the server first.
+// Registered keys can afterwards be used adding serverPubKey= to the DSN.
+//
+// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
+// after registering it and may not be modified.
+//
+// data, err := ioutil.ReadFile("mykey.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// block, _ := pem.Decode(data)
+// if block == nil || block.Type != "PUBLIC KEY" {
+// log.Fatal("failed to decode PEM block containing public key")
+// }
+//
+// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
+// mysql.RegisterServerPubKey("mykey", rsaPubKey)
+// } else {
+// log.Fatal("not a RSA public key")
+// }
+func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry == nil {
+ serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
+ }
+
+ serverPubKeyRegistry[name] = pubKey
+ serverPubKeyLock.Unlock()
+}
+
+// DeregisterServerPubKey removes the public key registered with the given name.
+func DeregisterServerPubKey(name string) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry != nil {
+ delete(serverPubKeyRegistry, name)
+ }
+ serverPubKeyLock.Unlock()
+}
+
+func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
+ serverPubKeyLock.RLock()
+ if v, ok := serverPubKeyRegistry[name]; ok {
+ pubKey = v
+ }
+ serverPubKeyLock.RUnlock()
+ return
+}
+
+// Hash password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+ seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+ return &myRnd{
+ seed1: seed1 % myRndMaxVal,
+ seed2: seed2 % myRndMaxVal,
+ }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+ var add uint32 = 7
+ var tmp uint32
+
+ result[0] = 1345345333
+ result[1] = 0x12345671
+
+ for _, c := range password {
+ // skip spaces and tabs in password
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ tmp = uint32(c)
+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+ result[1] += (result[1] << 8) ^ result[0]
+ add += tmp
+ }
+
+ // Remove sign bit (1<<31)-1)
+ result[0] &= 0x7FFFFFFF
+ result[1] &= 0x7FFFFFFF
+
+ return
+}
+
+// Hash password using insecure pre 4.1 method
+func scrambleOldPassword(scramble []byte, password string) []byte {
+ scramble = scramble[:8]
+
+ hashPw := pwHash([]byte(password))
+ hashSc := pwHash(scramble)
+
+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+ var out [8]byte
+ for i := range out {
+ out[i] = r.NextByte() + 64
+ }
+
+ mask := r.NextByte()
+ for i := range out {
+ out[i] ^= mask
+ }
+
+ return out[:]
+}
+
+// Hash password using 4.1+ method (SHA1)
+func scramblePassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write([]byte(password))
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+// Hash password using MySQL 8+ method (SHA256)
+func scrambleSHA256Password(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
+
+ crypt := sha256.New()
+ crypt.Write([]byte(password))
+ message1 := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1)
+ message1Hash := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1Hash)
+ crypt.Write(scramble)
+ message2 := crypt.Sum(nil)
+
+ for i := range message1 {
+ message1[i] ^= message2[i]
+ }
+
+ return message1
+}
+
+func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
+ plain := make([]byte, len(password)+1)
+ copy(plain, password)
+ for i := range plain {
+ j := i % len(seed)
+ plain[i] ^= seed[j]
+ }
+ sha1 := sha1.New()
+ return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
+}
+
+func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
+ enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
+ if err != nil {
+ return err
+ }
+ return mc.writeAuthSwitchPacket(enc)
+}
+
+func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
+ switch plugin {
+ case "caching_sha2_password":
+ authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
+ return authResp, nil
+
+ case "mysql_old_password":
+ if !mc.cfg.AllowOldPasswords {
+ return nil, ErrOldPassword
+ }
+ if len(mc.cfg.Passwd) == 0 {
+ return nil, nil
+ }
+ // Note: there are edge cases where this should work but doesn't;
+ // this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+ authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
+ return authResp, nil
+
+ case "mysql_clear_password":
+ if !mc.cfg.AllowCleartextPasswords {
+ return nil, ErrCleartextPassword
+ }
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ return append([]byte(mc.cfg.Passwd), 0), nil
+
+ case "mysql_native_password":
+ if !mc.cfg.AllowNativePasswords {
+ return nil, ErrNativePassword
+ }
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
+ return authResp, nil
+
+ case "sha256_password":
+ if len(mc.cfg.Passwd) == 0 {
+ return []byte{0}, nil
+ }
+ // unlike caching_sha2_password, sha256_password does not accept
+ // cleartext password on unix transport.
+ if mc.cfg.TLS != nil {
+ // write cleartext auth packet
+ return append([]byte(mc.cfg.Passwd), 0), nil
+ }
+
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ return []byte{1}, nil
+ }
+
+ // encrypted password
+ enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
+ return enc, err
+
+ default:
+ errLog.Print("unknown auth plugin:", plugin)
+ return nil, ErrUnknownPlugin
+ }
+}
+
+func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
+ // Read Result Packet
+ authData, newPlugin, err := mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // handle auth plugin switch, if requested
+ if newPlugin != "" {
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if authData == nil {
+ authData = oldAuthData
+ } else {
+ // copy data from read buffer to owned slice
+ copy(oldAuthData, authData)
+ }
+
+ plugin = newPlugin
+
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ return err
+ }
+ if err = mc.writeAuthSwitchPacket(authResp); err != nil {
+ return err
+ }
+
+ // Read Result Packet
+ authData, newPlugin, err = mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // Do not allow to change the auth plugin more than once
+ if newPlugin != "" {
+ return ErrMalformPkt
+ }
+ }
+
+ switch plugin {
+
+ // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ case "caching_sha2_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ case 1:
+ switch authData[0] {
+ case cachingSha2PasswordFastAuthSuccess:
+ if err = mc.readResultOK(); err == nil {
+ return nil // auth successful
+ }
+
+ case cachingSha2PasswordPerformFullAuthentication:
+ if mc.cfg.TLS != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
+ if err != nil {
+ return err
+ }
+ } else {
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ data, err := mc.buf.takeSmallBuffer(4 + 1)
+ if err != nil {
+ return err
+ }
+ data[4] = cachingSha2PasswordRequestPublicKey
+ err = mc.writePacket(data)
+ if err != nil {
+ return err
+ }
+
+ if data, err = mc.readPacket(); err != nil {
+ return err
+ }
+
+ if data[0] != iAuthMoreData {
+ return fmt.Errorf("unexpect resp from server for caching_sha2_password perform full authentication")
+ }
+
+ // parse public key
+ block, rest := pem.Decode(data[1:])
+ if block == nil {
+ return fmt.Errorf("No Pem data found, data: %s", rest)
+ }
+ pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+ pubKey = pkix.(*rsa.PublicKey)
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pubKey)
+ if err != nil {
+ return err
+ }
+ }
+ return mc.readResultOK()
+
+ default:
+ return ErrMalformPkt
+ }
+ default:
+ return ErrMalformPkt
+ }
+
+ case "sha256_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ default:
+ block, _ := pem.Decode(authData)
+ if block == nil {
+ return fmt.Errorf("no Pem data found, data: %s", authData)
+ }
+
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
+ if err != nil {
+ return err
+ }
+ return mc.readResultOK()
+ }
+
+ default:
+ return nil // auth successful
+ }
+
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
new file mode 100644
index 0000000..0774c5c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "io"
+ "net"
+ "time"
+)
+
+const defaultBufSize = 4096
+const maxCachedBufSize = 256 * 1024
+
+// A buffer which is used for both reading and writing.
+// This is possible since communication on each connection is synchronous.
+// In other words, we can't write and read simultaneously on the same connection.
+// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
+// Also highly optimized for this particular use case.
+// This buffer is backed by two byte slices in a double-buffering scheme
+type buffer struct {
+ buf []byte // buf is a byte buffer who's length and capacity are equal.
+ nc net.Conn
+ idx int
+ length int
+ timeout time.Duration
+ dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
+ flipcnt uint // flipccnt is the current buffer counter for double-buffering
+}
+
+// newBuffer allocates and returns a new buffer.
+func newBuffer(nc net.Conn) buffer {
+ fg := make([]byte, defaultBufSize)
+ return buffer{
+ buf: fg,
+ nc: nc,
+ dbuf: [2][]byte{fg, nil},
+ }
+}
+
+// flip replaces the active buffer with the background buffer
+// this is a delayed flip that simply increases the buffer counter;
+// the actual flip will be performed the next time we call `buffer.fill`
+func (b *buffer) flip() {
+ b.flipcnt += 1
+}
+
+// fill reads into the buffer until at least _need_ bytes are in it
+func (b *buffer) fill(need int) error {
+ n := b.length
+ // fill data into its double-buffering target: if we've called
+ // flip on this buffer, we'll be copying to the background buffer,
+ // and then filling it with network data; otherwise we'll just move
+ // the contents of the current buffer to the front before filling it
+ dest := b.dbuf[b.flipcnt&1]
+
+ // grow buffer if necessary to fit the whole packet.
+ if need > len(dest) {
+ // Round up to the next multiple of the default size
+ dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
+
+ // if the allocated buffer is not too large, move it to backing storage
+ // to prevent extra allocations on applications that perform large reads
+ if len(dest) <= maxCachedBufSize {
+ b.dbuf[b.flipcnt&1] = dest
+ }
+ }
+
+ // if we're filling the fg buffer, move the existing data to the start of it.
+ // if we're filling the bg buffer, copy over the data
+ if n > 0 {
+ copy(dest[:n], b.buf[b.idx:])
+ }
+
+ b.buf = dest
+ b.idx = 0
+
+ for {
+ if b.timeout > 0 {
+ if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
+ return err
+ }
+ }
+
+ nn, err := b.nc.Read(b.buf[n:])
+ n += nn
+
+ switch err {
+ case nil:
+ if n < need {
+ continue
+ }
+ b.length = n
+ return nil
+
+ case io.EOF:
+ if n >= need {
+ b.length = n
+ return nil
+ }
+ return io.ErrUnexpectedEOF
+
+ default:
+ return err
+ }
+ }
+}
+
+// returns next N bytes from buffer.
+// The returned slice is only guaranteed to be valid until the next read
+func (b *buffer) readNext(need int) ([]byte, error) {
+ if b.length < need {
+ // refill
+ if err := b.fill(need); err != nil {
+ return nil, err
+ }
+ }
+
+ offset := b.idx
+ b.idx += need
+ b.length -= need
+ return b.buf[offset:b.idx], nil
+}
+
+// takeBuffer returns a buffer with the requested size.
+// If possible, a slice from the existing buffer is returned.
+// Otherwise a bigger buffer is made.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeBuffer(length int) ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+
+ // test (cheap) general case first
+ if length <= cap(b.buf) {
+ return b.buf[:length], nil
+ }
+
+ if length < maxPacketSize {
+ b.buf = make([]byte, length)
+ return b.buf, nil
+ }
+
+ // buffer is larger than we want to store.
+ return make([]byte, length), nil
+}
+
+// takeSmallBuffer is shortcut which can be used if length is
+// known to be smaller than defaultBufSize.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+ return b.buf[:length], nil
+}
+
+// takeCompleteBuffer returns the complete existing buffer.
+// This can be used if the necessary buffer size is unknown.
+// cap and len of the returned buffer will be equal.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeCompleteBuffer() ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+ return b.buf, nil
+}
+
+// store stores buf, an updated buffer, if its suitable to do so.
+func (b *buffer) store(buf []byte) error {
+ if b.length > 0 {
+ return ErrBusyBuffer
+ } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
+ b.buf = buf[:cap(buf)]
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
new file mode 100644
index 0000000..295bfbe
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -0,0 +1,266 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const defaultCollation = "utf8mb4_general_ci"
+const binaryCollation = "binary"
+
+// A list of available collations mapped to the internal ID.
+// To update this map use the following MySQL query:
+//
+// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID
+//
+// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255.
+//
+// ucs2, utf16, and utf32 can't be used for connection charset.
+// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset
+// They are commented out to reduce this map.
+var collations = map[string]byte{
+ "big5_chinese_ci": 1,
+ "latin2_czech_cs": 2,
+ "dec8_swedish_ci": 3,
+ "cp850_general_ci": 4,
+ "latin1_german1_ci": 5,
+ "hp8_english_ci": 6,
+ "koi8r_general_ci": 7,
+ "latin1_swedish_ci": 8,
+ "latin2_general_ci": 9,
+ "swe7_swedish_ci": 10,
+ "ascii_general_ci": 11,
+ "ujis_japanese_ci": 12,
+ "sjis_japanese_ci": 13,
+ "cp1251_bulgarian_ci": 14,
+ "latin1_danish_ci": 15,
+ "hebrew_general_ci": 16,
+ "tis620_thai_ci": 18,
+ "euckr_korean_ci": 19,
+ "latin7_estonian_cs": 20,
+ "latin2_hungarian_ci": 21,
+ "koi8u_general_ci": 22,
+ "cp1251_ukrainian_ci": 23,
+ "gb2312_chinese_ci": 24,
+ "greek_general_ci": 25,
+ "cp1250_general_ci": 26,
+ "latin2_croatian_ci": 27,
+ "gbk_chinese_ci": 28,
+ "cp1257_lithuanian_ci": 29,
+ "latin5_turkish_ci": 30,
+ "latin1_german2_ci": 31,
+ "armscii8_general_ci": 32,
+ "utf8_general_ci": 33,
+ "cp1250_czech_cs": 34,
+ //"ucs2_general_ci": 35,
+ "cp866_general_ci": 36,
+ "keybcs2_general_ci": 37,
+ "macce_general_ci": 38,
+ "macroman_general_ci": 39,
+ "cp852_general_ci": 40,
+ "latin7_general_ci": 41,
+ "latin7_general_cs": 42,
+ "macce_bin": 43,
+ "cp1250_croatian_ci": 44,
+ "utf8mb4_general_ci": 45,
+ "utf8mb4_bin": 46,
+ "latin1_bin": 47,
+ "latin1_general_ci": 48,
+ "latin1_general_cs": 49,
+ "cp1251_bin": 50,
+ "cp1251_general_ci": 51,
+ "cp1251_general_cs": 52,
+ "macroman_bin": 53,
+ //"utf16_general_ci": 54,
+ //"utf16_bin": 55,
+ //"utf16le_general_ci": 56,
+ "cp1256_general_ci": 57,
+ "cp1257_bin": 58,
+ "cp1257_general_ci": 59,
+ //"utf32_general_ci": 60,
+ //"utf32_bin": 61,
+ //"utf16le_bin": 62,
+ "binary": 63,
+ "armscii8_bin": 64,
+ "ascii_bin": 65,
+ "cp1250_bin": 66,
+ "cp1256_bin": 67,
+ "cp866_bin": 68,
+ "dec8_bin": 69,
+ "greek_bin": 70,
+ "hebrew_bin": 71,
+ "hp8_bin": 72,
+ "keybcs2_bin": 73,
+ "koi8r_bin": 74,
+ "koi8u_bin": 75,
+ "utf8_tolower_ci": 76,
+ "latin2_bin": 77,
+ "latin5_bin": 78,
+ "latin7_bin": 79,
+ "cp850_bin": 80,
+ "cp852_bin": 81,
+ "swe7_bin": 82,
+ "utf8_bin": 83,
+ "big5_bin": 84,
+ "euckr_bin": 85,
+ "gb2312_bin": 86,
+ "gbk_bin": 87,
+ "sjis_bin": 88,
+ "tis620_bin": 89,
+ //"ucs2_bin": 90,
+ "ujis_bin": 91,
+ "geostd8_general_ci": 92,
+ "geostd8_bin": 93,
+ "latin1_spanish_ci": 94,
+ "cp932_japanese_ci": 95,
+ "cp932_bin": 96,
+ "eucjpms_japanese_ci": 97,
+ "eucjpms_bin": 98,
+ "cp1250_polish_ci": 99,
+ //"utf16_unicode_ci": 101,
+ //"utf16_icelandic_ci": 102,
+ //"utf16_latvian_ci": 103,
+ //"utf16_romanian_ci": 104,
+ //"utf16_slovenian_ci": 105,
+ //"utf16_polish_ci": 106,
+ //"utf16_estonian_ci": 107,
+ //"utf16_spanish_ci": 108,
+ //"utf16_swedish_ci": 109,
+ //"utf16_turkish_ci": 110,
+ //"utf16_czech_ci": 111,
+ //"utf16_danish_ci": 112,
+ //"utf16_lithuanian_ci": 113,
+ //"utf16_slovak_ci": 114,
+ //"utf16_spanish2_ci": 115,
+ //"utf16_roman_ci": 116,
+ //"utf16_persian_ci": 117,
+ //"utf16_esperanto_ci": 118,
+ //"utf16_hungarian_ci": 119,
+ //"utf16_sinhala_ci": 120,
+ //"utf16_german2_ci": 121,
+ //"utf16_croatian_ci": 122,
+ //"utf16_unicode_520_ci": 123,
+ //"utf16_vietnamese_ci": 124,
+ //"ucs2_unicode_ci": 128,
+ //"ucs2_icelandic_ci": 129,
+ //"ucs2_latvian_ci": 130,
+ //"ucs2_romanian_ci": 131,
+ //"ucs2_slovenian_ci": 132,
+ //"ucs2_polish_ci": 133,
+ //"ucs2_estonian_ci": 134,
+ //"ucs2_spanish_ci": 135,
+ //"ucs2_swedish_ci": 136,
+ //"ucs2_turkish_ci": 137,
+ //"ucs2_czech_ci": 138,
+ //"ucs2_danish_ci": 139,
+ //"ucs2_lithuanian_ci": 140,
+ //"ucs2_slovak_ci": 141,
+ //"ucs2_spanish2_ci": 142,
+ //"ucs2_roman_ci": 143,
+ //"ucs2_persian_ci": 144,
+ //"ucs2_esperanto_ci": 145,
+ //"ucs2_hungarian_ci": 146,
+ //"ucs2_sinhala_ci": 147,
+ //"ucs2_german2_ci": 148,
+ //"ucs2_croatian_ci": 149,
+ //"ucs2_unicode_520_ci": 150,
+ //"ucs2_vietnamese_ci": 151,
+ //"ucs2_general_mysql500_ci": 159,
+ //"utf32_unicode_ci": 160,
+ //"utf32_icelandic_ci": 161,
+ //"utf32_latvian_ci": 162,
+ //"utf32_romanian_ci": 163,
+ //"utf32_slovenian_ci": 164,
+ //"utf32_polish_ci": 165,
+ //"utf32_estonian_ci": 166,
+ //"utf32_spanish_ci": 167,
+ //"utf32_swedish_ci": 168,
+ //"utf32_turkish_ci": 169,
+ //"utf32_czech_ci": 170,
+ //"utf32_danish_ci": 171,
+ //"utf32_lithuanian_ci": 172,
+ //"utf32_slovak_ci": 173,
+ //"utf32_spanish2_ci": 174,
+ //"utf32_roman_ci": 175,
+ //"utf32_persian_ci": 176,
+ //"utf32_esperanto_ci": 177,
+ //"utf32_hungarian_ci": 178,
+ //"utf32_sinhala_ci": 179,
+ //"utf32_german2_ci": 180,
+ //"utf32_croatian_ci": 181,
+ //"utf32_unicode_520_ci": 182,
+ //"utf32_vietnamese_ci": 183,
+ "utf8_unicode_ci": 192,
+ "utf8_icelandic_ci": 193,
+ "utf8_latvian_ci": 194,
+ "utf8_romanian_ci": 195,
+ "utf8_slovenian_ci": 196,
+ "utf8_polish_ci": 197,
+ "utf8_estonian_ci": 198,
+ "utf8_spanish_ci": 199,
+ "utf8_swedish_ci": 200,
+ "utf8_turkish_ci": 201,
+ "utf8_czech_ci": 202,
+ "utf8_danish_ci": 203,
+ "utf8_lithuanian_ci": 204,
+ "utf8_slovak_ci": 205,
+ "utf8_spanish2_ci": 206,
+ "utf8_roman_ci": 207,
+ "utf8_persian_ci": 208,
+ "utf8_esperanto_ci": 209,
+ "utf8_hungarian_ci": 210,
+ "utf8_sinhala_ci": 211,
+ "utf8_german2_ci": 212,
+ "utf8_croatian_ci": 213,
+ "utf8_unicode_520_ci": 214,
+ "utf8_vietnamese_ci": 215,
+ "utf8_general_mysql500_ci": 223,
+ "utf8mb4_unicode_ci": 224,
+ "utf8mb4_icelandic_ci": 225,
+ "utf8mb4_latvian_ci": 226,
+ "utf8mb4_romanian_ci": 227,
+ "utf8mb4_slovenian_ci": 228,
+ "utf8mb4_polish_ci": 229,
+ "utf8mb4_estonian_ci": 230,
+ "utf8mb4_spanish_ci": 231,
+ "utf8mb4_swedish_ci": 232,
+ "utf8mb4_turkish_ci": 233,
+ "utf8mb4_czech_ci": 234,
+ "utf8mb4_danish_ci": 235,
+ "utf8mb4_lithuanian_ci": 236,
+ "utf8mb4_slovak_ci": 237,
+ "utf8mb4_spanish2_ci": 238,
+ "utf8mb4_roman_ci": 239,
+ "utf8mb4_persian_ci": 240,
+ "utf8mb4_esperanto_ci": 241,
+ "utf8mb4_hungarian_ci": 242,
+ "utf8mb4_sinhala_ci": 243,
+ "utf8mb4_german2_ci": 244,
+ "utf8mb4_croatian_ci": 245,
+ "utf8mb4_unicode_520_ci": 246,
+ "utf8mb4_vietnamese_ci": 247,
+ "gb18030_chinese_ci": 248,
+ "gb18030_bin": 249,
+ "gb18030_unicode_520_ci": 250,
+ "utf8mb4_0900_ai_ci": 255,
+}
+
+// A denylist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+ "big5_chinese_ci": true,
+ "sjis_japanese_ci": true,
+ "gbk_chinese_ci": true,
+ "big5_bin": true,
+ "gb2312_bin": true,
+ "gbk_bin": true,
+ "sjis_bin": true,
+ "cp932_japanese_ci": true,
+ "cp932_bin": true,
+ "gb18030_chinese_ci": true,
+ "gb18030_bin": true,
+ "gb18030_unicode_520_ci": true,
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/go-sql-driver/mysql/conncheck.go
new file mode 100644
index 0000000..0ea7217
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/conncheck.go
@@ -0,0 +1,55 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//go:build linux || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || illumos
+// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
+
+package mysql
+
+import (
+ "errors"
+ "io"
+ "net"
+ "syscall"
+)
+
+var errUnexpectedRead = errors.New("unexpected read from socket")
+
+func connCheck(conn net.Conn) error {
+ var sysErr error
+
+ sysConn, ok := conn.(syscall.Conn)
+ if !ok {
+ return nil
+ }
+ rawConn, err := sysConn.SyscallConn()
+ if err != nil {
+ return err
+ }
+
+ err = rawConn.Read(func(fd uintptr) bool {
+ var buf [1]byte
+ n, err := syscall.Read(int(fd), buf[:])
+ switch {
+ case n == 0 && err == nil:
+ sysErr = io.EOF
+ case n > 0:
+ sysErr = errUnexpectedRead
+ case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
+ sysErr = nil
+ default:
+ sysErr = err
+ }
+ return true
+ })
+ if err != nil {
+ return err
+ }
+
+ return sysErr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
new file mode 100644
index 0000000..a56c138
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
@@ -0,0 +1,18 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//go:build !linux && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !illumos
+// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos
+
+package mysql
+
+import "net"
+
+func connCheck(conn net.Conn) error {
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
new file mode 100644
index 0000000..947a883
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -0,0 +1,650 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "encoding/json"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type mysqlConn struct {
+ buf buffer
+ netConn net.Conn
+ rawConn net.Conn // underlying connection when netConn is TLS connection.
+ affectedRows uint64
+ insertId uint64
+ cfg *Config
+ maxAllowedPacket int
+ maxWriteSize int
+ writeTimeout time.Duration
+ flags clientFlag
+ status statusFlag
+ sequence uint8
+ parseTime bool
+ reset bool // set when the Go SQL package calls ResetSession
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- context.Context
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
+}
+
+// Handles parameters set in DSN after the connection is established
+func (mc *mysqlConn) handleParams() (err error) {
+ var cmdSet strings.Builder
+ for param, val := range mc.cfg.Params {
+ switch param {
+ // Charset: character_set_connection, character_set_client, character_set_results
+ case "charset":
+ charsets := strings.Split(val, ",")
+ for i := range charsets {
+ // ignore errors here - a charset may not exist
+ err = mc.exec("SET NAMES " + charsets[i])
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return
+ }
+
+ // Other system vars accumulated in a single SET command
+ default:
+ if cmdSet.Len() == 0 {
+ // Heuristic: 29 chars for each other key=value to reduce reallocations
+ cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.WriteString("SET ")
+ } else {
+ cmdSet.WriteString(", ")
+ }
+ cmdSet.WriteString(param)
+ cmdSet.WriteString(" = ")
+ cmdSet.WriteString(val)
+ }
+ }
+
+ if cmdSet.Len() > 0 {
+ err = mc.exec(cmdSet.String())
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
+func (mc *mysqlConn) Begin() (driver.Tx, error) {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
+ if err == nil {
+ return &mysqlTx{mc}, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+func (mc *mysqlConn) Close() (err error) {
+ // Makes Close idempotent
+ if !mc.closed.Load() {
+ err = mc.writeCommandPacket(comQuit)
+ }
+
+ mc.cleanup()
+
+ return
+}
+
+// Closes the network connection and unsets internal variables. Do not call this
+// function after successfully authentication, call Close instead. This function
+// is called before auth or on auth failure because MySQL will have already
+// closed the network connection.
+func (mc *mysqlConn) cleanup() {
+ if mc.closed.Swap(true) {
+ return
+ }
+
+ // Makes cleanup idempotent
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.Load() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
+ }
+ return ErrInvalidConn
+ }
+ return nil
+}
+
+func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comStmtPrepare, query)
+ if err != nil {
+ // STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
+ errLog.Print(err)
+ return nil, driver.ErrBadConn
+ }
+
+ stmt := &mysqlStmt{
+ mc: mc,
+ }
+
+ // Read Result
+ columnCount, err := stmt.readPrepareResultPacket()
+ if err == nil {
+ if stmt.paramCount > 0 {
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if columnCount > 0 {
+ err = mc.readUntilEOF()
+ }
+ }
+
+ return stmt, err
+}
+
+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+ // Number of ? should be same to len(args)
+ if strings.Count(query, "?") != len(args) {
+ return "", driver.ErrSkip
+ }
+
+ buf, err := mc.buf.takeCompleteBuffer()
+ if err != nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return "", ErrInvalidConn
+ }
+ buf = buf[:0]
+ argPos := 0
+
+ for i := 0; i < len(query); i++ {
+ q := strings.IndexByte(query[i:], '?')
+ if q == -1 {
+ buf = append(buf, query[i:]...)
+ break
+ }
+ buf = append(buf, query[i:i+q]...)
+ i += q
+
+ arg := args[argPos]
+ argPos++
+
+ if arg == nil {
+ buf = append(buf, "NULL"...)
+ continue
+ }
+
+ switch v := arg.(type) {
+ case int64:
+ buf = strconv.AppendInt(buf, v, 10)
+ case uint64:
+ // Handle uint64 explicitly because our custom ConvertValue emits unsigned values
+ buf = strconv.AppendUint(buf, v, 10)
+ case float64:
+ buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
+ case bool:
+ if v {
+ buf = append(buf, '1')
+ } else {
+ buf = append(buf, '0')
+ }
+ case time.Time:
+ if v.IsZero() {
+ buf = append(buf, "'0000-00-00'"...)
+ } else {
+ buf = append(buf, '\'')
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, '\'')
+ }
+ case json.RawMessage:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ case []byte:
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, "_binary'"...)
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ }
+ case string:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeStringBackslash(buf, v)
+ } else {
+ buf = escapeStringQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ default:
+ return "", driver.ErrSkip
+ }
+
+ if len(buf)+4 > mc.maxAllowedPacket {
+ return "", driver.ErrSkip
+ }
+ }
+ if argPos != len(args) {
+ return "", driver.ErrSkip
+ }
+ return string(buf), nil
+}
+
+func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ err := mc.exec(query)
+ if err == nil {
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Internal function to execute commands
+func (mc *mysqlConn) exec(query string) error {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+
+ return mc.discardResults()
+}
+
+func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try client-side prepare to reduce roundtrip
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comQuery, query)
+ if err == nil {
+ // Read Result
+ var resLen int
+ resLen, err = mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+
+ if resLen == 0 {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ // Columns
+ rows.rs.columns, err = mc.readColumns(resLen)
+ return rows, err
+ }
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Gets the value of the given MySQL System Variable
+// The returned byte slice is only valid until the next read
+func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
+ return nil, err
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+
+ if resLen > 0 {
+ // Columns
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ dest := make([]driver.Value, resLen)
+ if err = rows.readRow(dest); err == nil {
+ return dest[0].([]byte), mc.readUntilEOF()
+ }
+ }
+ return nil, err
+}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
+ if mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err = mc.watchCancel(ctx); err != nil {
+ return
+ }
+ defer mc.finish()
+
+ if err = mc.writeCommandPacket(comPing); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ return mc.readResultOK()
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if mc.closed.Load() {
+ return nil, driver.ErrBadConn
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ // When ctx is already cancelled, don't watch it.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ // When ctx is not cancellable, don't watch it.
+ if ctx.Done() == nil {
+ return nil
+ }
+ // When watcher is not alive, can't watch it.
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watching = true
+ mc.watcher <- ctx
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan context.Context, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx context.Context
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
+
+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+// ResetSession implements driver.SessionResetter.
+// (From Go 1.10)
+func (mc *mysqlConn) ResetSession(ctx context.Context) error {
+ if mc.closed.Load() {
+ return driver.ErrBadConn
+ }
+ mc.reset = true
+ return nil
+}
+
+// IsValid implements driver.Validator interface
+// (From Go 1.15)
+func (mc *mysqlConn) IsValid() bool {
+ return !mc.closed.Load()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go
new file mode 100644
index 0000000..d567b4e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connector.go
@@ -0,0 +1,146 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "context"
+ "database/sql/driver"
+ "net"
+)
+
+type connector struct {
+ cfg *Config // immutable private copy.
+}
+
+// Connect implements driver.Connector interface.
+// Connect returns a connection to the database.
+func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
+ var err error
+
+ // New mysqlConn
+ mc := &mysqlConn{
+ maxAllowedPacket: maxPacketSize,
+ maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
+ cfg: c.cfg,
+ }
+ mc.parseTime = mc.cfg.ParseTime
+
+ // Connect to Server
+ dialsLock.RLock()
+ dial, ok := dials[mc.cfg.Net]
+ dialsLock.RUnlock()
+ if ok {
+ dctx := ctx
+ if mc.cfg.Timeout > 0 {
+ var cancel context.CancelFunc
+ dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
+ defer cancel()
+ }
+ mc.netConn, err = dial(dctx, mc.cfg.Addr)
+ } else {
+ nd := net.Dialer{Timeout: mc.cfg.Timeout}
+ mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Enable TCP Keepalives on TCP connections
+ if tc, ok := mc.netConn.(*net.TCPConn); ok {
+ if err := tc.SetKeepAlive(true); err != nil {
+ // Don't send COM_QUIT before handshake.
+ mc.netConn.Close()
+ mc.netConn = nil
+ return nil, err
+ }
+ }
+
+ // Call startWatcher for context support (From Go 1.8)
+ mc.startWatcher()
+ if err := mc.watchCancel(ctx); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ defer mc.finish()
+
+ mc.buf = newBuffer(mc.netConn)
+
+ // Set I/O timeouts
+ mc.buf.timeout = mc.cfg.ReadTimeout
+ mc.writeTimeout = mc.cfg.WriteTimeout
+
+ // Reading Handshake Initialization Packet
+ authData, plugin, err := mc.readHandshakePacket()
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ if plugin == "" {
+ plugin = defaultAuthPlugin
+ }
+
+ // Send Client Authentication Packet
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ // try the default auth plugin, if using the requested plugin failed
+ errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ plugin = defaultAuthPlugin
+ authResp, err = mc.auth(authData, plugin)
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ }
+ if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ // Handle response to auth packet, switch methods if possible
+ if err = mc.handleAuthResult(authData, plugin); err != nil {
+ // Authentication failed and MySQL has already closed the connection
+ // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
+ // Do not send COM_QUIT, just cleanup and return the error.
+ mc.cleanup()
+ return nil, err
+ }
+
+ if mc.cfg.MaxAllowedPacket > 0 {
+ mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+ } else {
+ // Get max allowed packet size
+ maxap, err := mc.getSystemVar("max_allowed_packet")
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+ mc.maxAllowedPacket = stringToInt(maxap) - 1
+ }
+ if mc.maxAllowedPacket < maxPacketSize {
+ mc.maxWriteSize = mc.maxAllowedPacket
+ }
+
+ // Handle DSN Params
+ err = mc.handleParams()
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+
+ return mc, nil
+}
+
+// Driver implements driver.Connector interface.
+// Driver returns &MySQLDriver{}.
+func (c *connector) Driver() driver.Driver {
+ return &MySQLDriver{}
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
new file mode 100644
index 0000000..64e2bce
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -0,0 +1,174 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const (
+ defaultAuthPlugin = "mysql_native_password"
+ defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355
+ minProtocolVersion = 10
+ maxPacketSize = 1<<24 - 1
+ timeFormat = "2006-01-02 15:04:05.999999"
+)
+
+// MySQL constants documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+const (
+ iOK byte = 0x00
+ iAuthMoreData byte = 0x01
+ iLocalInFile byte = 0xfb
+ iEOF byte = 0xfe
+ iERR byte = 0xff
+)
+
+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
+type clientFlag uint32
+
+const (
+ clientLongPassword clientFlag = 1 << iota
+ clientFoundRows
+ clientLongFlag
+ clientConnectWithDB
+ clientNoSchema
+ clientCompress
+ clientODBC
+ clientLocalFiles
+ clientIgnoreSpace
+ clientProtocol41
+ clientInteractive
+ clientSSL
+ clientIgnoreSIGPIPE
+ clientTransactions
+ clientReserved
+ clientSecureConn
+ clientMultiStatements
+ clientMultiResults
+ clientPSMultiResults
+ clientPluginAuth
+ clientConnectAttrs
+ clientPluginAuthLenEncClientData
+ clientCanHandleExpiredPasswords
+ clientSessionTrack
+ clientDeprecateEOF
+)
+
+const (
+ comQuit byte = iota + 1
+ comInitDB
+ comQuery
+ comFieldList
+ comCreateDB
+ comDropDB
+ comRefresh
+ comShutdown
+ comStatistics
+ comProcessInfo
+ comConnect
+ comProcessKill
+ comDebug
+ comPing
+ comTime
+ comDelayedInsert
+ comChangeUser
+ comBinlogDump
+ comTableDump
+ comConnectOut
+ comRegisterSlave
+ comStmtPrepare
+ comStmtExecute
+ comStmtSendLongData
+ comStmtClose
+ comStmtReset
+ comSetOption
+ comStmtFetch
+)
+
+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
+const (
+ fieldTypeDecimal fieldType = iota
+ fieldTypeTiny
+ fieldTypeShort
+ fieldTypeLong
+ fieldTypeFloat
+ fieldTypeDouble
+ fieldTypeNULL
+ fieldTypeTimestamp
+ fieldTypeLongLong
+ fieldTypeInt24
+ fieldTypeDate
+ fieldTypeTime
+ fieldTypeDateTime
+ fieldTypeYear
+ fieldTypeNewDate
+ fieldTypeVarChar
+ fieldTypeBit
+)
+const (
+ fieldTypeJSON fieldType = iota + 0xf5
+ fieldTypeNewDecimal
+ fieldTypeEnum
+ fieldTypeSet
+ fieldTypeTinyBLOB
+ fieldTypeMediumBLOB
+ fieldTypeLongBLOB
+ fieldTypeBLOB
+ fieldTypeVarString
+ fieldTypeString
+ fieldTypeGeometry
+)
+
+type fieldFlag uint16
+
+const (
+ flagNotNULL fieldFlag = 1 << iota
+ flagPriKey
+ flagUniqueKey
+ flagMultipleKey
+ flagBLOB
+ flagUnsigned
+ flagZeroFill
+ flagBinary
+ flagEnum
+ flagAutoIncrement
+ flagTimestamp
+ flagSet
+ flagUnknown1
+ flagUnknown2
+ flagUnknown3
+ flagUnknown4
+)
+
+// http://dev.mysql.com/doc/internals/en/status-flags.html
+type statusFlag uint16
+
+const (
+ statusInTrans statusFlag = 1 << iota
+ statusInAutocommit
+ statusReserved // Not in documentation
+ statusMoreResultsExists
+ statusNoGoodIndexUsed
+ statusNoIndexUsed
+ statusCursorExists
+ statusLastRowSent
+ statusDbDropped
+ statusNoBackslashEscapes
+ statusMetadataChanged
+ statusQueryWasSlow
+ statusPsOutParams
+ statusInTransReadonly
+ statusSessionStateChanged
+)
+
+const (
+ cachingSha2PasswordRequestPublicKey = 2
+ cachingSha2PasswordFastAuthSuccess = 3
+ cachingSha2PasswordPerformFullAuthentication = 4
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
new file mode 100644
index 0000000..ad7aec2
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -0,0 +1,107 @@
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// Package mysql provides a MySQL driver for Go's database/sql package.
+//
+// The driver should be used via the database/sql package:
+//
+// import "database/sql"
+// import _ "github.com/go-sql-driver/mysql"
+//
+// db, err := sql.Open("mysql", "user:password@/dbname")
+//
+// See https://github.com/go-sql-driver/mysql#usage for details
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "net"
+ "sync"
+)
+
+// MySQLDriver is exported to make the driver directly accessible.
+// In general the driver is used via the database/sql package.
+type MySQLDriver struct{}
+
+// DialFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDial
+//
+// Deprecated: users should register a DialContextFunc instead
+type DialFunc func(addr string) (net.Conn, error)
+
+// DialContextFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDialContext
+type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error)
+
+var (
+ dialsLock sync.RWMutex
+ dials map[string]DialContextFunc
+)
+
+// RegisterDialContext registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// The current context for the connection and its address is passed to the dial function.
+func RegisterDialContext(net string, dial DialContextFunc) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials == nil {
+ dials = make(map[string]DialContextFunc)
+ }
+ dials[net] = dial
+}
+
+// RegisterDial registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// addr is passed as a parameter to the dial function.
+//
+// Deprecated: users should call RegisterDialContext instead
+func RegisterDial(network string, dial DialFunc) {
+ RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) {
+ return dial(addr)
+ })
+}
+
+// Open new Connection.
+// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
+// the DSN string is formatted
+func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
+ cfg, err := ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ c := &connector{
+ cfg: cfg,
+ }
+ return c.Connect(context.Background())
+}
+
+func init() {
+ sql.Register("mysql", &MySQLDriver{})
+}
+
+// NewConnector returns new driver.Connector.
+func NewConnector(cfg *Config) (driver.Connector, error) {
+ cfg = cfg.Clone()
+ // normalize the contents of cfg so calls to NewConnector have the same
+ // behavior as MySQLDriver.OpenConnector
+ if err := cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return &connector{cfg: cfg}, nil
+}
+
+// OpenConnector implements driver.DriverContext.
+func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
+ cfg, err := ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ return &connector{
+ cfg: cfg,
+ }, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
new file mode 100644
index 0000000..4b71aaa
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -0,0 +1,577 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
+type Config struct {
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network type
+ Addr string // Network address (requires Net)
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ pubKey *rsa.PublicKey // Server public key
+ TLSConfig string // TLS configuration name
+ TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+
+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowFallbackToPlaintext bool // Allows fallback to unencrypted connection if server does not support TLS
+ AllowNativePasswords bool // Allows the native password authentication method
+ AllowOldPasswords bool // Allows the old insecure password method
+ CheckConnLiveness bool // Check connections for liveness before using them
+ ClientFoundRows bool // Return number of matching rows instead of rows changed
+ ColumnsWithAlias bool // Prepend table alias to column names
+ InterpolateParams bool // Interpolate placeholders into query string
+ MultiStatements bool // Allow multiple statements in one query
+ ParseTime bool // Parse time values to time.Time
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ CheckConnLiveness: true,
+ }
+}
+
+func (cfg *Config) Clone() *Config {
+ cp := *cfg
+ if cp.TLS != nil {
+ cp.TLS = cfg.TLS.Clone()
+ }
+ if len(cp.Params) > 0 {
+ cp.Params = make(map[string]string, len(cfg.Params))
+ for k, v := range cfg.Params {
+ cp.Params[k] = v
+ }
+ }
+ if cfg.pubKey != nil {
+ cp.pubKey = &rsa.PublicKey{
+ N: new(big.Int).Set(cfg.pubKey.N),
+ E: cfg.pubKey.E,
+ }
+ }
+ return &cp
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ if cfg.TLS == nil {
+ switch cfg.TLSConfig {
+ case "false", "":
+ // don't set anything
+ case "true":
+ cfg.TLS = &tls.Config{}
+ case "skip-verify":
+ cfg.TLS = &tls.Config{InsecureSkipVerify: true}
+ case "preferred":
+ cfg.TLS = &tls.Config{InsecureSkipVerify: true}
+ cfg.AllowFallbackToPlaintext = true
+ default:
+ cfg.TLS = getTLSConfigClone(cfg.TLSConfig)
+ if cfg.TLS == nil {
+ return errors.New("invalid value / unknown config name: " + cfg.TLSConfig)
+ }
+ }
+ }
+
+ if cfg.TLS != nil && cfg.TLS.ServerName == "" && !cfg.TLS.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.TLS.ServerName = host
+ }
+ }
+
+ if cfg.ServerPubKey != "" {
+ cfg.pubKey = getServerPubKey(cfg.ServerPubKey)
+ if cfg.pubKey == nil {
+ return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey)
+ }
+ }
+
+ return nil
+}
+
+func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
+ buf.Grow(1 + len(name) + 1 + len(value))
+ if !*hasParam {
+ *hasParam = true
+ buf.WriteByte('?')
+ } else {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(name)
+ buf.WriteByte('=')
+ buf.WriteString(value)
+}
+
+// FormatDSN formats the given Config into a DSN string which can be passed to
+// the driver.
+func (cfg *Config) FormatDSN() string {
+ var buf bytes.Buffer
+
+ // [username[:password]@]
+ if len(cfg.User) > 0 {
+ buf.WriteString(cfg.User)
+ if len(cfg.Passwd) > 0 {
+ buf.WriteByte(':')
+ buf.WriteString(cfg.Passwd)
+ }
+ buf.WriteByte('@')
+ }
+
+ // [protocol[(address)]]
+ if len(cfg.Net) > 0 {
+ buf.WriteString(cfg.Net)
+ if len(cfg.Addr) > 0 {
+ buf.WriteByte('(')
+ buf.WriteString(cfg.Addr)
+ buf.WriteByte(')')
+ }
+ }
+
+ // /dbname
+ buf.WriteByte('/')
+ buf.WriteString(cfg.DBName)
+
+ // [?param1=value1&...¶mN=valueN]
+ hasParam := false
+
+ if cfg.AllowAllFiles {
+ hasParam = true
+ buf.WriteString("?allowAllFiles=true")
+ }
+
+ if cfg.AllowCleartextPasswords {
+ writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true")
+ }
+
+ if cfg.AllowFallbackToPlaintext {
+ writeDSNParam(&buf, &hasParam, "allowFallbackToPlaintext", "true")
+ }
+
+ if !cfg.AllowNativePasswords {
+ writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false")
+ }
+
+ if cfg.AllowOldPasswords {
+ writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true")
+ }
+
+ if !cfg.CheckConnLiveness {
+ writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false")
+ }
+
+ if cfg.ClientFoundRows {
+ writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
+ }
+
+ if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ writeDSNParam(&buf, &hasParam, "collation", col)
+ }
+
+ if cfg.ColumnsWithAlias {
+ writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
+ }
+
+ if cfg.InterpolateParams {
+ writeDSNParam(&buf, &hasParam, "interpolateParams", "true")
+ }
+
+ if cfg.Loc != time.UTC && cfg.Loc != nil {
+ writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String()))
+ }
+
+ if cfg.MultiStatements {
+ writeDSNParam(&buf, &hasParam, "multiStatements", "true")
+ }
+
+ if cfg.ParseTime {
+ writeDSNParam(&buf, &hasParam, "parseTime", "true")
+ }
+
+ if cfg.ReadTimeout > 0 {
+ writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
+ }
+
+ if cfg.RejectReadOnly {
+ writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true")
+ }
+
+ if len(cfg.ServerPubKey) > 0 {
+ writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey))
+ }
+
+ if cfg.Timeout > 0 {
+ writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String())
+ }
+
+ if len(cfg.TLSConfig) > 0 {
+ writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig))
+ }
+
+ if cfg.WriteTimeout > 0 {
+ writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String())
+ }
+
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
+ writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket))
+ }
+
+ // other params
+ if cfg.Params != nil {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
+ writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param]))
+ }
+ }
+
+ return buf.String()
+}
+
+// ParseDSN parses the DSN string to a Config
+func ParseDSN(dsn string) (cfg *Config, err error) {
+ // New config with some default values
+ cfg = NewConfig()
+
+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
+ // Find the last '/' (since the password or the net addr might contain a '/')
+ foundSlash := false
+ for i := len(dsn) - 1; i >= 0; i-- {
+ if dsn[i] == '/' {
+ foundSlash = true
+ var j, k int
+
+ // left part is empty if i <= 0
+ if i > 0 {
+ // [username[:password]@][protocol[(address)]]
+ // Find the last '@' in dsn[:i]
+ for j = i; j >= 0; j-- {
+ if dsn[j] == '@' {
+ // username[:password]
+ // Find the first ':' in dsn[:j]
+ for k = 0; k < j; k++ {
+ if dsn[k] == ':' {
+ cfg.Passwd = dsn[k+1 : j]
+ break
+ }
+ }
+ cfg.User = dsn[:k]
+
+ break
+ }
+ }
+
+ // [protocol[(address)]]
+ // Find the first '(' in dsn[j+1:i]
+ for k = j + 1; k < i; k++ {
+ if dsn[k] == '(' {
+ // dsn[i-1] must be == ')' if an address is specified
+ if dsn[i-1] != ')' {
+ if strings.ContainsRune(dsn[k+1:i], ')') {
+ return nil, errInvalidDSNUnescaped
+ }
+ return nil, errInvalidDSNAddr
+ }
+ cfg.Addr = dsn[k+1 : i-1]
+ break
+ }
+ }
+ cfg.Net = dsn[j+1 : k]
+ }
+
+ // dbname[?param1=value1&...¶mN=valueN]
+ // Find the first '?' in dsn[i+1:]
+ for j = i + 1; j < len(dsn); j++ {
+ if dsn[j] == '?' {
+ if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
+ return
+ }
+ break
+ }
+ }
+ cfg.DBName = dsn[i+1 : j]
+
+ break
+ }
+ }
+
+ if !foundSlash && len(dsn) > 0 {
+ return nil, errInvalidDSNNoSlash
+ }
+
+ if err = cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return
+}
+
+// parseDSNParams parses the DSN "query string"
+// Values must be url.QueryEscape'ed
+func parseDSNParams(cfg *Config, params string) (err error) {
+ for _, v := range strings.Split(params, "&") {
+ param := strings.SplitN(v, "=", 2)
+ if len(param) != 2 {
+ continue
+ }
+
+ // cfg params
+ switch value := param[1]; param[0] {
+ // Disable INFILE allowlist / enable all files
+ case "allowAllFiles":
+ var isBool bool
+ cfg.AllowAllFiles, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use cleartext authentication mode (MySQL 5.5.10+)
+ case "allowCleartextPasswords":
+ var isBool bool
+ cfg.AllowCleartextPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Allow fallback to unencrypted connection if server does not support TLS
+ case "allowFallbackToPlaintext":
+ var isBool bool
+ cfg.AllowFallbackToPlaintext, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use native password authentication
+ case "allowNativePasswords":
+ var isBool bool
+ cfg.AllowNativePasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use old authentication mode (pre MySQL 4.1)
+ case "allowOldPasswords":
+ var isBool bool
+ cfg.AllowOldPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Check connections for Liveness before using them
+ case "checkConnLiveness":
+ var isBool bool
+ cfg.CheckConnLiveness, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Switch "rowsAffected" mode
+ case "clientFoundRows":
+ var isBool bool
+ cfg.ClientFoundRows, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Collation
+ case "collation":
+ cfg.Collation = value
+
+ case "columnsWithAlias":
+ var isBool bool
+ cfg.ColumnsWithAlias, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Compression
+ case "compress":
+ return errors.New("compression not implemented yet")
+
+ // Enable client side placeholder substitution
+ case "interpolateParams":
+ var isBool bool
+ cfg.InterpolateParams, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Time Location
+ case "loc":
+ if value, err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ cfg.Loc, err = time.LoadLocation(value)
+ if err != nil {
+ return
+ }
+
+ // multiple statements in one query
+ case "multiStatements":
+ var isBool bool
+ cfg.MultiStatements, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // time.Time parsing
+ case "parseTime":
+ var isBool bool
+ cfg.ParseTime, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // I/O read Timeout
+ case "readTimeout":
+ cfg.ReadTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // Reject read-only connections
+ case "rejectReadOnly":
+ var isBool bool
+ cfg.RejectReadOnly, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Server public key
+ case "serverPubKey":
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for server pub key name: %v", err)
+ }
+ cfg.ServerPubKey = name
+
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
+ // Dial Timeout
+ case "timeout":
+ cfg.Timeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // TLS-Encryption
+ case "tls":
+ boolValue, isBool := readBool(value)
+ if isBool {
+ if boolValue {
+ cfg.TLSConfig = "true"
+ } else {
+ cfg.TLSConfig = "false"
+ }
+ } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" {
+ cfg.TLSConfig = vl
+ } else {
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for TLS config name: %v", err)
+ }
+ cfg.TLSConfig = name
+ }
+
+ // I/O write Timeout
+ case "writeTimeout":
+ cfg.WriteTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+ case "maxAllowedPacket":
+ cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+ if err != nil {
+ return
+ }
+ default:
+ // lazy init
+ if cfg.Params == nil {
+ cfg.Params = make(map[string]string)
+ }
+
+ if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
new file mode 100644
index 0000000..ff9a8f0
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -0,0 +1,77 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "os"
+)
+
+// Various errors the driver might return. Can change between driver versions.
+var (
+ ErrInvalidConn = errors.New("invalid connection")
+ ErrMalformPkt = errors.New("malformed packet")
+ ErrNoTLS = errors.New("TLS requested but server does not support TLS")
+ ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
+ ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
+ ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
+ ErrPktSync = errors.New("commands out of sync. You can't run this command now")
+ ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the `Config.MaxAllowedPacket`")
+ ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
+)
+
+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+
+// Logger is used to log critical error messages.
+type Logger interface {
+ Print(v ...interface{})
+}
+
+// SetLogger is used to set the logger for critical errors.
+// The initial logger is os.Stderr.
+func SetLogger(logger Logger) error {
+ if logger == nil {
+ return errors.New("logger is nil")
+ }
+ errLog = logger
+ return nil
+}
+
+// MySQLError is an error type which represents a single MySQL error
+type MySQLError struct {
+ Number uint16
+ SQLState [5]byte
+ Message string
+}
+
+func (me *MySQLError) Error() string {
+ if me.SQLState != [5]byte{} {
+ return fmt.Sprintf("Error %d (%s): %s", me.Number, me.SQLState, me.Message)
+ }
+
+ return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
+}
+
+func (me *MySQLError) Is(err error) bool {
+ if merr, ok := err.(*MySQLError); ok {
+ return merr.Number == me.Number
+ }
+ return false
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 0000000..e0654a8
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,206 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+func (mf *mysqlField) typeDatabaseName() string {
+ switch mf.fieldType {
+ case fieldTypeBit:
+ return "BIT"
+ case fieldTypeBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TEXT"
+ }
+ return "BLOB"
+ case fieldTypeDate:
+ return "DATE"
+ case fieldTypeDateTime:
+ return "DATETIME"
+ case fieldTypeDecimal:
+ return "DECIMAL"
+ case fieldTypeDouble:
+ return "DOUBLE"
+ case fieldTypeEnum:
+ return "ENUM"
+ case fieldTypeFloat:
+ return "FLOAT"
+ case fieldTypeGeometry:
+ return "GEOMETRY"
+ case fieldTypeInt24:
+ return "MEDIUMINT"
+ case fieldTypeJSON:
+ return "JSON"
+ case fieldTypeLong:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED INT"
+ }
+ return "INT"
+ case fieldTypeLongBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "LONGTEXT"
+ }
+ return "LONGBLOB"
+ case fieldTypeLongLong:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED BIGINT"
+ }
+ return "BIGINT"
+ case fieldTypeMediumBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "MEDIUMTEXT"
+ }
+ return "MEDIUMBLOB"
+ case fieldTypeNewDate:
+ return "DATE"
+ case fieldTypeNewDecimal:
+ return "DECIMAL"
+ case fieldTypeNULL:
+ return "NULL"
+ case fieldTypeSet:
+ return "SET"
+ case fieldTypeShort:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED SMALLINT"
+ }
+ return "SMALLINT"
+ case fieldTypeString:
+ if mf.charSet == collations[binaryCollation] {
+ return "BINARY"
+ }
+ return "CHAR"
+ case fieldTypeTime:
+ return "TIME"
+ case fieldTypeTimestamp:
+ return "TIMESTAMP"
+ case fieldTypeTiny:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED TINYINT"
+ }
+ return "TINYINT"
+ case fieldTypeTinyBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TINYTEXT"
+ }
+ return "TINYBLOB"
+ case fieldTypeVarChar:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeVarString:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeYear:
+ return "YEAR"
+ default:
+ return ""
+ }
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+ charSet uint8
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fuzz.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go
new file mode 100644
index 0000000..3a4ec25
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fuzz.go
@@ -0,0 +1,25 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
+//
+// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//go:build gofuzz
+// +build gofuzz
+
+package mysql
+
+import (
+ "database/sql"
+)
+
+func Fuzz(data []byte) int {
+ db, err := sql.Open("mysql", string(data))
+ if err != nil {
+ return 0
+ }
+ db.Close()
+ return 1
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
new file mode 100644
index 0000000..3279dcf
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ fileRegister map[string]bool
+ fileRegisterLock sync.RWMutex
+ readerRegister map[string]func() io.Reader
+ readerRegisterLock sync.RWMutex
+)
+
+// RegisterLocalFile adds the given file to the file allowlist,
+// so that it can be used by "LOAD DATA LOCAL INFILE ".
+// Alternatively you can allow the use of all local files with
+// the DSN parameter 'allowAllFiles=true'
+//
+// filePath := "/home/gopher/data.csv"
+// mysql.RegisterLocalFile(filePath)
+// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
+// if err != nil {
+// ...
+func RegisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ // lazy map init
+ if fileRegister == nil {
+ fileRegister = make(map[string]bool)
+ }
+
+ fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegisterLock.Unlock()
+}
+
+// DeregisterLocalFile removes the given filepath from the allowlist.
+func DeregisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ delete(fileRegister, strings.Trim(filePath, `"`))
+ fileRegisterLock.Unlock()
+}
+
+// RegisterReaderHandler registers a handler function which is used
+// to receive a io.Reader.
+// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
+// If the handler returns a io.ReadCloser Close() is called when the
+// request is finished.
+//
+// mysql.RegisterReaderHandler("data", func() io.Reader {
+// var csvReader io.Reader // Some Reader that returns CSV data
+// ... // Open Reader here
+// return csvReader
+// })
+// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
+// if err != nil {
+// ...
+func RegisterReaderHandler(name string, handler func() io.Reader) {
+ readerRegisterLock.Lock()
+ // lazy map init
+ if readerRegister == nil {
+ readerRegister = make(map[string]func() io.Reader)
+ }
+
+ readerRegister[name] = handler
+ readerRegisterLock.Unlock()
+}
+
+// DeregisterReaderHandler removes the ReaderHandler function with
+// the given name from the registry.
+func DeregisterReaderHandler(name string) {
+ readerRegisterLock.Lock()
+ delete(readerRegister, name)
+ readerRegisterLock.Unlock()
+}
+
+func deferredClose(err *error, closer io.Closer) {
+ closeErr := closer.Close()
+ if *err == nil {
+ *err = closeErr
+ }
+}
+
+const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
+
+func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+ var rdr io.Reader
+ var data []byte
+ packetSize := defaultPacketSize
+ if mc.maxWriteSize < packetSize {
+ packetSize = mc.maxWriteSize
+ }
+
+ if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
+ // The server might return an an absolute path. See issue #355.
+ name = name[idx+8:]
+
+ readerRegisterLock.RLock()
+ handler, inMap := readerRegister[name]
+ readerRegisterLock.RUnlock()
+
+ if inMap {
+ rdr = handler()
+ if rdr != nil {
+ if cl, ok := rdr.(io.Closer); ok {
+ defer deferredClose(&err, cl)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is ", name)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is not registered", name)
+ }
+ } else { // File
+ name = strings.Trim(name, `"`)
+ fileRegisterLock.RLock()
+ fr := fileRegister[name]
+ fileRegisterLock.RUnlock()
+ if mc.cfg.AllowAllFiles || fr {
+ var file *os.File
+ var fi os.FileInfo
+
+ if file, err = os.Open(name); err == nil {
+ defer deferredClose(&err, file)
+
+ // get file size
+ if fi, err = file.Stat(); err == nil {
+ rdr = file
+ if fileSize := int(fi.Size()); fileSize < packetSize {
+ packetSize = fileSize
+ }
+ }
+ }
+ } else {
+ err = fmt.Errorf("local file '%s' is not registered", name)
+ }
+ }
+
+ // send content packets
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
+ data := make([]byte, 4+packetSize)
+ var n int
+ for err == nil {
+ n, err = rdr.Read(data[4:])
+ if n > 0 {
+ if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ return ioErr
+ }
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ }
+
+ // send empty packet (termination)
+ if data == nil {
+ data = make([]byte, 4)
+ }
+ if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ return ioErr
+ }
+
+ // read OK packet
+ if err == nil {
+ return mc.readResultOK()
+ }
+
+ mc.readPacket()
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
new file mode 100644
index 0000000..36c8a42
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
@@ -0,0 +1,71 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "fmt"
+ "time"
+)
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+// var nt NullTime
+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+// ...
+// if nt.Valid {
+// // use nt.Time
+// } else {
+// // NULL value
+// }
+//
+// # This NullTime implementation is not driver-specific
+//
+// Deprecated: NullTime doesn't honor the loc DSN parameter.
+// NullTime.Scan interprets a time as UTC, not the loc DSN parameter.
+// Use sql.NullTime instead.
+type NullTime sql.NullTime
+
+// Scan implements the Scanner interface.
+// The value type must be time.Time or string / []byte (formatted time-string),
+// otherwise Scan fails.
+func (nt *NullTime) Scan(value interface{}) (err error) {
+ if value == nil {
+ nt.Time, nt.Valid = time.Time{}, false
+ return
+ }
+
+ switch v := value.(type) {
+ case time.Time:
+ nt.Time, nt.Valid = v, true
+ return
+ case []byte:
+ nt.Time, err = parseDateTime(v, time.UTC)
+ nt.Valid = (err == nil)
+ return
+ case string:
+ nt.Time, err = parseDateTime([]byte(v), time.UTC)
+ nt.Valid = (err == nil)
+ return
+ }
+
+ nt.Valid = false
+ return fmt.Errorf("Can't convert %T to time.Time", value)
+}
+
+// Value implements the driver Valuer interface.
+func (nt NullTime) Value() (driver.Value, error) {
+ if !nt.Valid {
+ return nil, nil
+ }
+ return nt.Time, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
new file mode 100644
index 0000000..ee05c95
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -0,0 +1,1349 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// Packets documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+// Read packet to buffer 'data'
+func (mc *mysqlConn) readPacket() ([]byte, error) {
+ var prevData []byte
+ for {
+ // read packet header
+ data, err := mc.buf.readNext(4)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // packet length [24 bit]
+ pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+
+ // check packet sync [8 bit]
+ if data[3] != mc.sequence {
+ if data[3] > mc.sequence {
+ return nil, ErrPktSyncMul
+ }
+ return nil, ErrPktSync
+ }
+ mc.sequence++
+
+ // packets with length 0 terminate a previous packet which is a
+ // multiple of (2^24)-1 bytes long
+ if pktLen == 0 {
+ // there was no previous packet
+ if prevData == nil {
+ errLog.Print(ErrMalformPkt)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ return prevData, nil
+ }
+
+ // read packet body [pktLen bytes]
+ data, err = mc.buf.readNext(pktLen)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // return data if this was the last packet
+ if pktLen < maxPacketSize {
+ // zero allocations for non-split packets
+ if prevData == nil {
+ return data, nil
+ }
+
+ return append(prevData, data...), nil
+ }
+
+ prevData = append(prevData, data...)
+ }
+}
+
+// Write packet buffer 'data'
+func (mc *mysqlConn) writePacket(data []byte) error {
+ pktLen := len(data) - 4
+
+ if pktLen > mc.maxAllowedPacket {
+ return ErrPktTooLarge
+ }
+
+ // Perform a stale connection check. We only perform this check for
+ // the first query on a connection that has been checked out of the
+ // connection pool: a fresh connection from the pool is more likely
+ // to be stale, and it has not performed any previous writes that
+ // could cause data corruption, so it's safe to return ErrBadConn
+ // if the check fails.
+ if mc.reset {
+ mc.reset = false
+ conn := mc.netConn
+ if mc.rawConn != nil {
+ conn = mc.rawConn
+ }
+ var err error
+ if mc.cfg.CheckConnLiveness {
+ if mc.cfg.ReadTimeout != 0 {
+ err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
+ }
+ if err == nil {
+ err = connCheck(conn)
+ }
+ }
+ if err != nil {
+ errLog.Print("closing bad idle connection: ", err)
+ mc.Close()
+ return driver.ErrBadConn
+ }
+ }
+
+ for {
+ var size int
+ if pktLen >= maxPacketSize {
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+ size = maxPacketSize
+ } else {
+ data[0] = byte(pktLen)
+ data[1] = byte(pktLen >> 8)
+ data[2] = byte(pktLen >> 16)
+ size = pktLen
+ }
+ data[3] = mc.sequence
+
+ // Write packet
+ if mc.writeTimeout > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+ return err
+ }
+ }
+
+ n, err := mc.netConn.Write(data[:4+size])
+ if err == nil && n == 4+size {
+ mc.sequence++
+ if size != maxPacketSize {
+ return nil
+ }
+ pktLen -= size
+ data = data[size:]
+ continue
+ }
+
+ // Handle error
+ if err == nil { // n != len(data)
+ mc.cleanup()
+ errLog.Print(ErrMalformPkt)
+ } else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
+ errLog.Print(err)
+ }
+ return ErrInvalidConn
+ }
+}
+
+/******************************************************************************
+* Initialization Process *
+******************************************************************************/
+
+// Handshake Initialization Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
+func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
+ data, err = mc.readPacket()
+ if err != nil {
+ // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+ // in connection initialization we don't risk retrying non-idempotent actions.
+ if err == ErrInvalidConn {
+ return nil, "", driver.ErrBadConn
+ }
+ return
+ }
+
+ if data[0] == iERR {
+ return nil, "", mc.handleErrorPacket(data)
+ }
+
+ // protocol version [1 byte]
+ if data[0] < minProtocolVersion {
+ return nil, "", fmt.Errorf(
+ "unsupported protocol version %d. Version %d or higher is required",
+ data[0],
+ minProtocolVersion,
+ )
+ }
+
+ // server version [null terminated string]
+ // connection id [4 bytes]
+ pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
+
+ // first part of the password cipher [8 bytes]
+ authData := data[pos : pos+8]
+
+ // (filler) always 0x00 [1 byte]
+ pos += 8 + 1
+
+ // capability flags (lower 2 bytes) [2 bytes]
+ mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ if mc.flags&clientProtocol41 == 0 {
+ return nil, "", ErrOldProtocol
+ }
+ if mc.flags&clientSSL == 0 && mc.cfg.TLS != nil {
+ if mc.cfg.AllowFallbackToPlaintext {
+ mc.cfg.TLS = nil
+ } else {
+ return nil, "", ErrNoTLS
+ }
+ }
+ pos += 2
+
+ if len(data) > pos {
+ // character set [1 byte]
+ // status flags [2 bytes]
+ // capability flags (upper 2 bytes) [2 bytes]
+ // length of auth-plugin-data [1 byte]
+ // reserved (all [00]) [10 bytes]
+ pos += 1 + 2 + 2 + 1 + 10
+
+ // second part of the password cipher [mininum 13 bytes],
+ // where len=MAX(13, length of auth-plugin-data - 8)
+ //
+ // The web documentation is ambiguous about the length. However,
+ // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
+ // the 13th byte is "\0 byte, terminating the second part of
+ // a scramble". So the second part of the password cipher is
+ // a NULL terminated string that's at least 13 bytes with the
+ // last byte being NULL.
+ //
+ // The official Python library uses the fixed length 12
+ // which seems to work but technically could have a hidden bug.
+ authData = append(authData, data[pos:pos+12]...)
+ pos += 13
+
+ // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
+ // \NUL otherwise
+ if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
+ plugin = string(data[pos : pos+end])
+ } else {
+ plugin = string(data[pos:])
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [20]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [8]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+}
+
+// Client Authentication Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
+func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
+ // Adjust client flags based on server support
+ clientFlags := clientProtocol41 |
+ clientSecureConn |
+ clientLongPassword |
+ clientTransactions |
+ clientLocalFiles |
+ clientPluginAuth |
+ clientMultiResults |
+ mc.flags&clientLongFlag
+
+ if mc.cfg.ClientFoundRows {
+ clientFlags |= clientFoundRows
+ }
+
+ // To enable TLS / SSL
+ if mc.cfg.TLS != nil {
+ clientFlags |= clientSSL
+ }
+
+ if mc.cfg.MultiStatements {
+ clientFlags |= clientMultiStatements
+ }
+
+ // encode length of the auth plugin data
+ var authRespLEIBuf [9]byte
+ authRespLen := len(authResp)
+ authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
+ if len(authRespLEI) > 1 {
+ // if the length can not be written in 1 byte, it must be written as a
+ // length encoded integer
+ clientFlags |= clientPluginAuthLenEncClientData
+ }
+
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
+
+ // To specify a db name
+ if n := len(mc.cfg.DBName); n > 0 {
+ clientFlags |= clientConnectWithDB
+ pktLen += n + 1
+ }
+
+ // Calculate packet length and get buffer with that size
+ data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // ClientFlags [32 bit]
+ data[4] = byte(clientFlags)
+ data[5] = byte(clientFlags >> 8)
+ data[6] = byte(clientFlags >> 16)
+ data[7] = byte(clientFlags >> 24)
+
+ // MaxPacketSize [32 bit] (none)
+ data[8] = 0x00
+ data[9] = 0x00
+ data[10] = 0x00
+ data[11] = 0x00
+
+ // Charset [1 byte]
+ var found bool
+ data[12], found = collations[mc.cfg.Collation]
+ if !found {
+ // Note possibility for false negatives:
+ // could be triggered although the collation is valid if the
+ // collations map does not contain entries the server supports.
+ return errors.New("unknown collation")
+ }
+
+ // Filler [23 bytes] (all 0x00)
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
+
+ // SSL Connection Request Packet
+ // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
+ if mc.cfg.TLS != nil {
+ // Send TLS / SSL request packet
+ if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
+ return err
+ }
+
+ // Switch to TLS
+ tlsConn := tls.Client(mc.netConn, mc.cfg.TLS)
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ mc.rawConn = mc.netConn
+ mc.netConn = tlsConn
+ mc.buf.nc = tlsConn
+ }
+
+ // User [null terminated string]
+ if len(mc.cfg.User) > 0 {
+ pos += copy(data[pos:], mc.cfg.User)
+ }
+ data[pos] = 0x00
+ pos++
+
+ // Auth Data [length encoded integer]
+ pos += copy(data[pos:], authRespLEI)
+ pos += copy(data[pos:], authResp)
+
+ // Databasename [null terminated string]
+ if len(mc.cfg.DBName) > 0 {
+ pos += copy(data[pos:], mc.cfg.DBName)
+ data[pos] = 0x00
+ pos++
+ }
+
+ pos += copy(data[pos:], plugin)
+ data[pos] = 0x00
+ pos++
+
+ // Send Auth packet
+ return mc.writePacket(data[:pos])
+}
+
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
+ pktLen := 4 + len(authData)
+ data, err := mc.buf.takeSmallBuffer(pktLen)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add the auth data [EOF]
+ copy(data[4:], authData)
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Command Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) writeCommandPacket(command byte) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data, err := mc.buf.takeSmallBuffer(4 + 1)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ pktLen := 1 + len(arg)
+ data, err := mc.buf.takeBuffer(pktLen + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg
+ copy(data[5:], arg)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg [32 bit]
+ data[5] = byte(arg)
+ data[6] = byte(arg >> 8)
+ data[7] = byte(arg >> 16)
+ data[8] = byte(arg >> 24)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Result Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, "", err
+ }
+
+ // packet indicator
+ switch data[0] {
+
+ case iOK:
+ return nil, "", mc.handleOkPacket(data)
+
+ case iAuthMoreData:
+ return data[1:], "", err
+
+ case iEOF:
+ if len(data) == 1 {
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, "mysql_old_password", nil
+ }
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ if pluginEndIndex < 0 {
+ return nil, "", ErrMalformPkt
+ }
+ plugin := string(data[1:pluginEndIndex])
+ authData := data[pluginEndIndex+1:]
+ return authData, plugin, nil
+
+ default: // Error otherwise
+ return nil, "", mc.handleErrorPacket(data)
+ }
+}
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() error {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ if data[0] == iOK {
+ return mc.handleOkPacket(data)
+ }
+ return mc.handleErrorPacket(data)
+}
+
+// Result Set Header Packet
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
+func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
+ data, err := mc.readPacket()
+ if err == nil {
+ switch data[0] {
+
+ case iOK:
+ return 0, mc.handleOkPacket(data)
+
+ case iERR:
+ return 0, mc.handleErrorPacket(data)
+
+ case iLocalInFile:
+ return 0, mc.handleInFileRequest(string(data[1:]))
+ }
+
+ // column count
+ num, _, n := readLengthEncodedInteger(data)
+ if n-len(data) == 0 {
+ return int(num), nil
+ }
+
+ return 0, ErrMalformPkt
+ }
+ return 0, err
+}
+
+// Error Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
+func (mc *mysqlConn) handleErrorPacket(data []byte) error {
+ if data[0] != iERR {
+ return ErrMalformPkt
+ }
+
+ // 0xff [1 byte]
+
+ // Error Number [16 bit uint]
+ errno := binary.LittleEndian.Uint16(data[1:3])
+
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
+ me := &MySQLError{Number: errno}
+
+ pos := 3
+
+ // SQL State [optional: # + 5bytes string]
+ if data[3] == 0x23 {
+ copy(me.SQLState[:], data[4:4+5])
+ pos = 9
+ }
+
+ // Error Message [string]
+ me.Message = string(data[pos:])
+
+ return me
+}
+
+func readStatus(b []byte) statusFlag {
+ return statusFlag(b[0]) | statusFlag(b[1])<<8
+}
+
+// Ok Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
+func (mc *mysqlConn) handleOkPacket(data []byte) error {
+ var n, m int
+
+ // 0x00 [1 byte]
+
+ // Affected rows [Length Coded Binary]
+ mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+
+ // Insert id [Length Coded Binary]
+ mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // server_status [2 bytes]
+ mc.status = readStatus(data[1+n+m : 1+n+m+2])
+ if mc.status&statusMoreResultsExists != 0 {
+ return nil
+ }
+
+ // warning count [2 bytes]
+
+ return nil
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
+func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
+ columns := make([]mysqlField, count)
+
+ for i := 0; ; i++ {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
+ if i == count {
+ return columns, nil
+ }
+ return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
+ }
+
+ // Catalog
+ pos, err := skipLengthEncodedString(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Database [len coded string]
+ n, err := skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Table [len coded string]
+ if mc.cfg.ColumnsWithAlias {
+ tableName, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ columns[i].tableName = string(tableName)
+ } else {
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ }
+
+ // Original table [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Name [len coded string]
+ name, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ columns[i].name = string(name)
+ pos += n
+
+ // Original name [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Filler [uint8]
+ pos++
+
+ // Charset [charset, collation uint8]
+ columns[i].charSet = data[pos]
+ pos += 2
+
+ // Length [uint32]
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
+
+ // Field type [uint8]
+ columns[i].fieldType = fieldType(data[pos])
+ pos++
+
+ // Flags [uint16]
+ columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ pos += 2
+
+ // Decimals [uint8]
+ columns[i].decimals = data[pos]
+ //pos++
+
+ // Default value [len coded binary]
+ //if pos < len(data) {
+ // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
+ //}
+ }
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
+func (rows *textRows) readRow(dest []driver.Value) error {
+ mc := rows.mc
+
+ if rows.rs.done {
+ return io.EOF
+ }
+
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ // server_status [2 bytes]
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ if data[0] == iERR {
+ rows.mc = nil
+ return mc.handleErrorPacket(data)
+ }
+
+ // RowSet Packet
+ var (
+ n int
+ isNull bool
+ pos int = 0
+ )
+
+ for i := range dest {
+ // Read bytes and convert to string
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+
+ if err != nil {
+ return err
+ }
+
+ if isNull {
+ dest[i] = nil
+ continue
+ }
+
+ if !mc.parseTime {
+ continue
+ }
+
+ // Parse time field
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeTimestamp,
+ fieldTypeDateTime,
+ fieldTypeDate,
+ fieldTypeNewDate:
+ if dest[i], err = parseDateTime(dest[i].([]byte), mc.cfg.Loc); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
+func (mc *mysqlConn) readUntilEOF() error {
+ for {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ switch data[0] {
+ case iERR:
+ return mc.handleErrorPacket(data)
+ case iEOF:
+ if len(data) == 5 {
+ mc.status = readStatus(data[3:])
+ }
+ return nil
+ }
+ }
+}
+
+/******************************************************************************
+* Prepared Statements *
+******************************************************************************/
+
+// Prepare Result Packets
+// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
+func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
+ data, err := stmt.mc.readPacket()
+ if err == nil {
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ return 0, stmt.mc.handleErrorPacket(data)
+ }
+
+ // statement id [4 bytes]
+ stmt.id = binary.LittleEndian.Uint32(data[1:5])
+
+ // Column count [16 bit uint]
+ columnCount := binary.LittleEndian.Uint16(data[5:7])
+
+ // Param count [16 bit uint]
+ stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
+
+ // Reserved [8 bit]
+
+ // Warning count [16 bit uint]
+
+ return columnCount, nil
+ }
+ return 0, err
+}
+
+// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
+func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
+ maxLen := stmt.mc.maxAllowedPacket - 1
+ pktLen := maxLen
+
+ // After the header (bytes 0-3) follows before the data:
+ // 1 byte command
+ // 4 bytes stmtID
+ // 2 bytes paramID
+ const dataOffset = 1 + 4 + 2
+
+ // Cannot use the write buffer since
+ // a) the buffer is too small
+ // b) it is in use
+ data := make([]byte, 4+1+4+2+len(arg))
+
+ copy(data[4+dataOffset:], arg)
+
+ for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
+ if dataOffset+argLen < maxLen {
+ pktLen = dataOffset + argLen
+ }
+
+ stmt.mc.sequence = 0
+ // Add command byte [1 byte]
+ data[4] = comStmtSendLongData
+
+ // Add stmtID [32 bit]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // Add paramID [16 bit]
+ data[9] = byte(paramID)
+ data[10] = byte(paramID >> 8)
+
+ // Send CMD packet
+ err := stmt.mc.writePacket(data[:4+pktLen])
+ if err == nil {
+ data = data[pktLen-dataOffset:]
+ continue
+ }
+ return err
+
+ }
+
+ // Reset Packet Sequence
+ stmt.mc.sequence = 0
+ return nil
+}
+
+// Execute Prepared Statement
+// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
+func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
+ if len(args) != stmt.paramCount {
+ return fmt.Errorf(
+ "argument count mismatch (got: %d; has: %d)",
+ len(args),
+ stmt.paramCount,
+ )
+ }
+
+ const minPktLen = 4 + 1 + 4 + 1 + 4
+ mc := stmt.mc
+
+ // Determine threshold dynamically to avoid packet size shortage.
+ longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+ if longDataSize < 64 {
+ longDataSize = 64
+ }
+
+ // Reset packet-sequence
+ mc.sequence = 0
+
+ var data []byte
+ var err error
+
+ if len(args) == 0 {
+ data, err = mc.buf.takeBuffer(minPktLen)
+ } else {
+ data, err = mc.buf.takeCompleteBuffer()
+ // In this case the len(data) == cap(data) which is used to optimise the flow below.
+ }
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // command [1 byte]
+ data[4] = comStmtExecute
+
+ // statement_id [4 bytes]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
+ data[9] = 0x00
+
+ // iteration_count (uint32(1)) [4 bytes]
+ data[10] = 0x01
+ data[11] = 0x00
+ data[12] = 0x00
+ data[13] = 0x00
+
+ if len(args) > 0 {
+ pos := minPktLen
+
+ var nullMask []byte
+ if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) {
+ // buffer has to be extended but we don't know by how much so
+ // we depend on append after all data with known sizes fit.
+ // We stop at that because we deal with a lot of columns here
+ // which makes the required allocation size hard to guess.
+ tmp := make([]byte, pos+maskLen+typesLen)
+ copy(tmp[:pos], data[:pos])
+ data = tmp
+ nullMask = data[pos : pos+maskLen]
+ // No need to clean nullMask as make ensures that.
+ pos += maskLen
+ } else {
+ nullMask = data[pos : pos+maskLen]
+ for i := range nullMask {
+ nullMask[i] = 0
+ }
+ pos += maskLen
+ }
+
+ // newParameterBoundFlag 1 [1 byte]
+ data[pos] = 0x01
+ pos++
+
+ // type of each parameter [len(args)*2 bytes]
+ paramTypes := data[pos:]
+ pos += len(args) * 2
+
+ // value of each parameter [n bytes]
+ paramValues := data[pos:pos]
+ valuesCap := cap(paramValues)
+
+ for i, arg := range args {
+ // build NULL-bitmap
+ if arg == nil {
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+ continue
+ }
+
+ if v, ok := arg.(json.RawMessage); ok {
+ arg = []byte(v)
+ }
+ // cache types and values
+ switch v := arg.(type) {
+ case int64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case uint64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x80 // type is unsigned
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case float64:
+ paramTypes[i+i] = byte(fieldTypeDouble)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ math.Float64bits(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(math.Float64bits(v))...,
+ )
+ }
+
+ case bool:
+ paramTypes[i+i] = byte(fieldTypeTiny)
+ paramTypes[i+i+1] = 0x00
+
+ if v {
+ paramValues = append(paramValues, 0x01)
+ } else {
+ paramValues = append(paramValues, 0x00)
+ }
+
+ case []byte:
+ // Common case (non-nil value) first
+ if v != nil {
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, v); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+
+ // Handle []byte(nil) as a NULL value
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+
+ case string:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
+ return err
+ }
+ }
+
+ case time.Time:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ var a [64]byte
+ var b = a[:0]
+
+ if v.IsZero() {
+ b = append(b, "0000-00-00"...)
+ } else {
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ if err != nil {
+ return err
+ }
+ }
+
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(b)),
+ )
+ paramValues = append(paramValues, b...)
+
+ default:
+ return fmt.Errorf("cannot convert type: %T", arg)
+ }
+ }
+
+ // Check if param values exceeded the available buffer
+ // In that case we must build the data packet with the new values buffer
+ if valuesCap != cap(paramValues) {
+ data = append(data[:pos], paramValues...)
+ if err = mc.buf.store(data); err != nil {
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+ }
+
+ pos += len(paramValues)
+ data = data[:pos]
+ }
+
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) discardResults() error {
+ for mc.status&statusMoreResultsExists != 0 {
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
+func (rows *binaryRows) readRow(dest []driver.Value) error {
+ data, err := rows.mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ mc := rows.mc
+ rows.mc = nil
+
+ // Error otherwise
+ return mc.handleErrorPacket(data)
+ }
+
+ // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
+ pos := 1 + (len(dest)+7+2)>>3
+ nullMask := data[1:pos]
+
+ for i := range dest {
+ // Field is NULL
+ // (byte >> bit-pos) % 2 == 1
+ if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
+ dest[i] = nil
+ continue
+ }
+
+ // Convert to byte-coded string
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeNULL:
+ dest[i] = nil
+ continue
+
+ // Numeric Types
+ case fieldTypeTiny:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(data[pos])
+ } else {
+ dest[i] = int64(int8(data[pos]))
+ }
+ pos++
+ continue
+
+ case fieldTypeShort, fieldTypeYear:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ } else {
+ dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
+ }
+ pos += 2
+ continue
+
+ case fieldTypeInt24, fieldTypeLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ } else {
+ dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ }
+ pos += 4
+ continue
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ val := binary.LittleEndian.Uint64(data[pos : pos+8])
+ if val > math.MaxInt64 {
+ dest[i] = uint64ToString(val)
+ } else {
+ dest[i] = int64(val)
+ }
+ } else {
+ dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ }
+ pos += 8
+ continue
+
+ case fieldTypeFloat:
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ pos += 4
+ continue
+
+ case fieldTypeDouble:
+ dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ continue
+
+ // Length coded Binary Strings
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+ var isNull bool
+ var n int
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ continue
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err
+
+ case
+ fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+ fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
+
+ num, isNull, n := readLengthEncodedInteger(data[pos:])
+ pos += n
+
+ switch {
+ case isNull:
+ dest[i] = nil
+ continue
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
+ // database/sql does not support an equivalent to TIME, return a string
+ var dstlen uint8
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 8
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 8 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
+ case rows.mc.parseTime:
+ dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
+ default:
+ var dstlen uint8
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
+ dstlen = 10
+ } else {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 19
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 19 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
+ }
+
+ if err == nil {
+ pos += int(num)
+ continue
+ } else {
+ return err
+ }
+
+ // Please report if this happens!
+ default:
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
new file mode 100644
index 0000000..c6438d0
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -0,0 +1,22 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlResult struct {
+ affectedRows int64
+ insertId int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+ return res.insertId, nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+ return res.affectedRows, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
new file mode 100644
index 0000000..888bdb5
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -0,0 +1,223 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "io"
+ "math"
+ "reflect"
+)
+
+type resultSet struct {
+ columns []mysqlField
+ columnNames []string
+ done bool
+}
+
+type mysqlRows struct {
+ mc *mysqlConn
+ rs resultSet
+ finish func()
+}
+
+type binaryRows struct {
+ mysqlRows
+}
+
+type textRows struct {
+ mysqlRows
+}
+
+func (rows *mysqlRows) Columns() []string {
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
+ columns := make([]string, len(rows.rs.columns))
+ if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+ for i := range columns {
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
+ } else {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+ } else {
+ for i := range columns {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+
+ rows.rs.columnNames = columns
+ return columns
+}
+
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ return rows.rs.columns[i].typeDatabaseName()
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
+ mc := rows.mc
+ if mc == nil {
+ return nil
+ }
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // flip the buffer for this connection if we need to drain it.
+ // note that for a successful query (i.e. one where rows.next()
+ // has been called until it returns false), `rows.mc` will be nil
+ // by the time the user calls `(*Rows).Close`, so we won't reach this
+ // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
+ mc.buf.flip()
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
+ if err == nil {
+ if err = mc.discardResults(); err != nil {
+ return err
+ }
+ }
+
+ rows.mc = nil
+ return err
+}
+
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if err := rows.mc.error(); err != nil {
+ return 0, err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
+ }
+ rows.rs.done = true
+ }
+
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
+ }
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
+}
+
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
+
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *textRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
new file mode 100644
index 0000000..10ece8b
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -0,0 +1,220 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+)
+
+type mysqlStmt struct {
+ mc *mysqlConn
+ id uint32
+ paramCount int
+}
+
+func (stmt *mysqlStmt) Close() error {
+ if stmt.mc == nil || stmt.mc.closed.Load() {
+ // driver.Stmt.Close can be called more than once, thus this function
+ // has to be idempotent.
+ // See also Issue #450 and golang/go#16019.
+ //errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
+ stmt.mc = nil
+ return err
+}
+
+func (stmt *mysqlStmt) NumInput() int {
+ return stmt.paramCount
+}
+
+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+ return converter{}
+}
+
+func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
+ if stmt.mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
+}
+
+func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.Load() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ rows := new(binaryRows)
+
+ if resLen > 0 {
+ rows.mc = mc
+ rows.rs.columns, err = mc.readColumns(resLen)
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ return rows, err
+}
+
+var jsonType = reflect.TypeOf(json.RawMessage{})
+
+type converter struct{}
+
+// ConvertValue mirrors the reference/default converter in database/sql/driver
+// with _one_ exception. We support uint64 with their high bit and the default
+// implementation does not. This function should be kept in sync with
+// database/sql/driver defaultConverter.ConvertValue() except for that
+// deliberate difference.
+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+ if driver.IsValue(v) {
+ return v, nil
+ }
+
+ if vr, ok := v.(driver.Valuer); ok {
+ sv, err := callValuerValue(vr)
+ if err != nil {
+ return nil, err
+ }
+ if driver.IsValue(sv) {
+ return sv, nil
+ }
+ // A value returned from the Valuer interface can be "a type handled by
+ // a database driver's NamedValueChecker interface" so we should accept
+ // uint64 here as well.
+ if u, ok := sv.(uint64); ok {
+ return u, nil
+ }
+ return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+ }
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Ptr:
+ // indirect pointers
+ if rv.IsNil() {
+ return nil, nil
+ } else {
+ return c.ConvertValue(rv.Elem().Interface())
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint(), nil
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), nil
+ case reflect.Bool:
+ return rv.Bool(), nil
+ case reflect.Slice:
+ switch t := rv.Type(); {
+ case t == jsonType:
+ return v, nil
+ case t.Elem().Kind() == reflect.Uint8:
+ return rv.Bytes(), nil
+ default:
+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, t.Elem().Kind())
+ }
+ case reflect.String:
+ return rv.String(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This is an exact copy of the same-named unexported function from the
+// database/sql package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+ rv.IsNil() &&
+ rv.Type().Elem().Implements(valuerReflectType) {
+ return nil, nil
+ }
+ return vr.Value()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
new file mode 100644
index 0000000..4a4b610
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -0,0 +1,31 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlTx struct {
+ mc *mysqlConn
+}
+
+func (tx *mysqlTx) Commit() (err error) {
+ if tx.mc == nil || tx.mc.closed.Load() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("COMMIT")
+ tx.mc = nil
+ return
+}
+
+func (tx *mysqlTx) Rollback() (err error) {
+ if tx.mc == nil || tx.mc.closed.Load() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("ROLLBACK")
+ tx.mc = nil
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
new file mode 100644
index 0000000..15dbd8d
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -0,0 +1,834 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Registry for custom tls.Configs
+var (
+ tlsConfigLock sync.RWMutex
+ tlsConfigRegistry map[string]*tls.Config
+)
+
+// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
+// Use the key as a value in the DSN where tls=value.
+//
+// Note: The provided tls.Config is exclusively owned by the driver after
+// registering it.
+//
+// rootCertPool := x509.NewCertPool()
+// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+// log.Fatal("Failed to append PEM.")
+// }
+// clientCert := make([]tls.Certificate, 0, 1)
+// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// clientCert = append(clientCert, certs)
+// mysql.RegisterTLSConfig("custom", &tls.Config{
+// RootCAs: rootCertPool,
+// Certificates: clientCert,
+// })
+// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
+func RegisterTLSConfig(key string, config *tls.Config) error {
+ if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
+ return fmt.Errorf("key '%s' is reserved", key)
+ }
+
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry == nil {
+ tlsConfigRegistry = make(map[string]*tls.Config)
+ }
+
+ tlsConfigRegistry[key] = config
+ tlsConfigLock.Unlock()
+ return nil
+}
+
+// DeregisterTLSConfig removes the tls.Config associated with key.
+func DeregisterTLSConfig(key string) {
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry != nil {
+ delete(tlsConfigRegistry, key)
+ }
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegistry[key]; ok {
+ config = v.Clone()
+ }
+ tlsConfigLock.RUnlock()
+ return
+}
+
+// Returns the bool value of the input.
+// The 2nd return value indicates if the input was a valid bool value
+func readBool(input string) (value bool, valid bool) {
+ switch input {
+ case "1", "true", "TRUE", "True":
+ return true, true
+ case "0", "false", "FALSE", "False":
+ return false, true
+ }
+
+ // Not a valid bool value
+ return
+}
+
+/******************************************************************************
+* Time related utils *
+******************************************************************************/
+
+func parseDateTime(b []byte, loc *time.Location) (time.Time, error) {
+ const base = "0000-00-00 00:00:00.000000"
+ switch len(b) {
+ case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+ if string(b) == base[:len(b)] {
+ return time.Time{}, nil
+ }
+
+ year, err := parseByteYear(b)
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[4] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[4])
+ }
+
+ m, err := parseByte2Digits(b[5], b[6])
+ if err != nil {
+ return time.Time{}, err
+ }
+ month := time.Month(m)
+
+ if b[7] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[7])
+ }
+
+ day, err := parseByte2Digits(b[8], b[9])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if len(b) == 10 {
+ return time.Date(year, month, day, 0, 0, 0, 0, loc), nil
+ }
+
+ if b[10] != ' ' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[10])
+ }
+
+ hour, err := parseByte2Digits(b[11], b[12])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[13] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[13])
+ }
+
+ min, err := parseByte2Digits(b[14], b[15])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[16] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[16])
+ }
+
+ sec, err := parseByte2Digits(b[17], b[18])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if len(b) == 19 {
+ return time.Date(year, month, day, hour, min, sec, 0, loc), nil
+ }
+
+ if b[19] != '.' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[19])
+ }
+ nsec, err := parseByteNanoSec(b[20:])
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Date(year, month, day, hour, min, sec, nsec, loc), nil
+ default:
+ return time.Time{}, fmt.Errorf("invalid time bytes: %s", b)
+ }
+}
+
+func parseByteYear(b []byte) (int, error) {
+ year, n := 0, 1000
+ for i := 0; i < 4; i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ year += v * n
+ n /= 10
+ }
+ return year, nil
+}
+
+func parseByte2Digits(b1, b2 byte) (int, error) {
+ d1, err := bToi(b1)
+ if err != nil {
+ return 0, err
+ }
+ d2, err := bToi(b2)
+ if err != nil {
+ return 0, err
+ }
+ return d1*10 + d2, nil
+}
+
+func parseByteNanoSec(b []byte) (int, error) {
+ ns, digit := 0, 100000 // max is 6-digits
+ for i := 0; i < len(b); i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ ns += v * digit
+ digit /= 10
+ }
+ // nanoseconds has 10-digits. (needs to scale digits)
+ // 10 - 6 = 4, so we have to multiple 1000.
+ return ns * 1000, nil
+}
+
+func bToi(b byte) (int, error) {
+ if b < '0' || b > '9' {
+ return 0, errors.New("not [0-9]")
+ }
+ return int(b - '0'), nil
+}
+
+func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
+ switch num {
+ case 0:
+ return time.Time{}, nil
+ case 4:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ 0, 0, 0, 0,
+ loc,
+ ), nil
+ case 7:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ 0,
+ loc,
+ ), nil
+ case 11:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
+ loc,
+ ), nil
+ }
+ return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
+}
+
+func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+ year, month, day := t.Date()
+ hour, min, sec := t.Clock()
+ nsec := t.Nanosecond()
+
+ if year < 1 || year > 9999 {
+ return buf, errors.New("year is not in the range [1, 9999]: " + strconv.Itoa(year)) // use errors.New instead of fmt.Errorf to avoid year escape to heap
+ }
+ year100 := year / 100
+ year1 := year % 100
+
+ var localBuf [len("2006-01-02T15:04:05.999999999")]byte // does not escape
+ localBuf[0], localBuf[1], localBuf[2], localBuf[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1]
+ localBuf[4] = '-'
+ localBuf[5], localBuf[6] = digits10[month], digits01[month]
+ localBuf[7] = '-'
+ localBuf[8], localBuf[9] = digits10[day], digits01[day]
+
+ if hour == 0 && min == 0 && sec == 0 && nsec == 0 {
+ return append(buf, localBuf[:10]...), nil
+ }
+
+ localBuf[10] = ' '
+ localBuf[11], localBuf[12] = digits10[hour], digits01[hour]
+ localBuf[13] = ':'
+ localBuf[14], localBuf[15] = digits10[min], digits01[min]
+ localBuf[16] = ':'
+ localBuf[17], localBuf[18] = digits10[sec], digits01[sec]
+
+ if nsec == 0 {
+ return append(buf, localBuf[:19]...), nil
+ }
+ nsec100000000 := nsec / 100000000
+ nsec1000000 := (nsec / 1000000) % 100
+ nsec10000 := (nsec / 10000) % 100
+ nsec100 := (nsec / 100) % 100
+ nsec1 := nsec % 100
+ localBuf[19] = '.'
+
+ // milli second
+ localBuf[20], localBuf[21], localBuf[22] =
+ digits01[nsec100000000], digits10[nsec1000000], digits01[nsec1000000]
+ // micro second
+ localBuf[23], localBuf[24], localBuf[25] =
+ digits10[nsec10000], digits01[nsec10000], digits10[nsec100]
+ // nano second
+ localBuf[26], localBuf[27], localBuf[28] =
+ digits01[nsec100], digits10[nsec1], digits01[nsec1]
+
+ // trim trailing zeros
+ n := len(localBuf)
+ for n > 0 && localBuf[n-1] == '0' {
+ n--
+ }
+
+ return append(buf, localBuf[:n]...), nil
+}
+
+// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
+// if the DATE or DATETIME has the zero value.
+// It must never be changed.
+// The current behavior depends on database/sql copying the result.
+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
+
+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
+
+func appendMicrosecs(dst, src []byte, decimals int) []byte {
+ if decimals <= 0 {
+ return dst
+ }
+ if len(src) == 0 {
+ return append(dst, ".000000"[:decimals+1]...)
+ }
+
+ microsecs := binary.LittleEndian.Uint32(src[:4])
+ p1 := byte(microsecs / 10000)
+ microsecs -= 10000 * uint32(p1)
+ p2 := byte(microsecs / 100)
+ microsecs -= 100 * uint32(p2)
+ p3 := byte(microsecs)
+
+ switch decimals {
+ default:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3], digits01[p3],
+ )
+ case 1:
+ return append(dst, '.',
+ digits10[p1],
+ )
+ case 2:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ )
+ case 3:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2],
+ )
+ case 4:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ )
+ case 5:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3],
+ )
+ }
+}
+
+func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[:length], nil
+ }
+ var dst []byte // return value
+ var p1, p2, p3 byte // current digit pair
+
+ switch length {
+ case 10, 19, 21, 22, 23, 24, 25, 26:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s length %d", t, length)
+ }
+ switch len(src) {
+ case 4, 7, 11:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+ }
+ dst = make([]byte, 0, length)
+ // start with the date
+ year := binary.LittleEndian.Uint16(src[:2])
+ pt := year / 100
+ p1 = byte(year - 100*uint16(pt))
+ p2, p3 = src[2], src[3]
+ dst = append(dst,
+ digits10[pt], digits01[pt],
+ digits10[p1], digits01[p1], '-',
+ digits10[p2], digits01[p2], '-',
+ digits10[p3], digits01[p3],
+ )
+ if length == 10 {
+ return dst, nil
+ }
+ if len(src) == 4 {
+ return append(dst, zeroDateTime[10:length]...), nil
+ }
+ dst = append(dst, ' ')
+ p1 = src[4] // hour
+ src = src[5:]
+
+ // p1 is 2-digit hour, src is after hour
+ p2, p3 = src[0], src[1]
+ dst = append(dst,
+ digits10[p1], digits01[p1], ':',
+ digits10[p2], digits01[p2], ':',
+ digits10[p3], digits01[p3],
+ )
+ return appendMicrosecs(dst, src[2:], int(length)-20), nil
+}
+
+func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[11 : 11+length], nil
+ }
+ var dst []byte // return value
+
+ switch length {
+ case
+ 8, // time (can be up to 10 when negative and 100+ hours)
+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
+ default:
+ return nil, fmt.Errorf("illegal TIME length %d", length)
+ }
+ switch len(src) {
+ case 8, 12:
+ default:
+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+ }
+ // +2 to enable negative time and 100+ hours
+ dst = make([]byte, 0, length+2)
+ if src[0] == 1 {
+ dst = append(dst, '-')
+ }
+ days := binary.LittleEndian.Uint32(src[1:5])
+ hours := int64(days)*24 + int64(src[5])
+
+ if hours >= 100 {
+ dst = strconv.AppendInt(dst, hours, 10)
+ } else {
+ dst = append(dst, digits10[hours], digits01[hours])
+ }
+
+ min, sec := src[6], src[7]
+ dst = append(dst, ':',
+ digits10[min], digits01[min], ':',
+ digits10[sec], digits01[sec],
+ )
+ return appendMicrosecs(dst, src[8:], int(length)-9), nil
+}
+
+/******************************************************************************
+* Convert from and to bytes *
+******************************************************************************/
+
+func uint64ToBytes(n uint64) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ byte(n >> 16),
+ byte(n >> 24),
+ byte(n >> 32),
+ byte(n >> 40),
+ byte(n >> 48),
+ byte(n >> 56),
+ }
+}
+
+func uint64ToString(n uint64) []byte {
+ var a [20]byte
+ i := 20
+
+ // U+0030 = 0
+ // ...
+ // U+0039 = 9
+
+ var q uint64
+ for n >= 10 {
+ i--
+ q = n / 10
+ a[i] = uint8(n-q*10) + 0x30
+ n = q
+ }
+
+ i--
+ a[i] = uint8(n) + 0x30
+
+ return a[i:]
+}
+
+// treats string value as unsigned integer representation
+func stringToInt(b []byte) int {
+ val := 0
+ for i := range b {
+ val *= 10
+ val += int(b[i] - 0x30)
+ }
+ return val
+}
+
+// returns the string read as a bytes slice, whether the value is NULL,
+// the number of bytes read and an error, in case the string is longer than
+// the input slice
+func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
+ // Get length
+ num, isNull, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return b[n:n], isNull, n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return b[n-int(num) : n : n], false, n, nil
+ }
+ return nil, false, n, io.EOF
+}
+
+// returns the number of bytes skipped and an error, in case the string is
+// longer than the input slice
+func skipLengthEncodedString(b []byte) (int, error) {
+ // Get length
+ num, _, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return n, nil
+ }
+ return n, io.EOF
+}
+
+// returns the number read, whether the value is NULL and the number of bytes read
+func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+ // See issue #349
+ if len(b) == 0 {
+ return 0, true, 1
+ }
+
+ switch b[0] {
+ // 251: NULL
+ case 0xfb:
+ return 0, true, 1
+
+ // 252: value of following 2
+ case 0xfc:
+ return uint64(b[1]) | uint64(b[2])<<8, false, 3
+
+ // 253: value of following 3
+ case 0xfd:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+
+ // 254: value of following 8
+ case 0xfe:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+ uint64(b[7])<<48 | uint64(b[8])<<56,
+ false, 9
+ }
+
+ // 0-250: value of first byte
+ return uint64(b[0]), false, 1
+}
+
+// encodes a uint64 value and appends it to the given bytes slice
+func appendLengthEncodedInteger(b []byte, n uint64) []byte {
+ switch {
+ case n <= 250:
+ return append(b, byte(n))
+
+ case n <= 0xffff:
+ return append(b, 0xfc, byte(n), byte(n>>8))
+
+ case n <= 0xffffff:
+ return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
+ }
+ return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
+ byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+}
+
+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+// If cap(buf) is not enough, reallocate new buffer.
+func reserveBuffer(buf []byte, appendSize int) []byte {
+ newSize := len(buf) + appendSize
+ if cap(buf) < newSize {
+ // Grow buffer exponentially
+ newBuf := make([]byte, len(buf)*2+appendSize)
+ copy(newBuf, buf)
+ buf = newBuf
+ }
+ return buf[:newSize]
+}
+
+// escapeBytesBackslash escapes []byte with backslashes (\)
+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+// characters, and turning others into specific escape sequences, such as
+// turning newlines into \n and null bytes into \0.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+func escapeBytesBackslash(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ switch c {
+ case '\x00':
+ buf[pos+1] = '0'
+ buf[pos] = '\\'
+ pos += 2
+ case '\n':
+ buf[pos+1] = 'n'
+ buf[pos] = '\\'
+ pos += 2
+ case '\r':
+ buf[pos+1] = 'r'
+ buf[pos] = '\\'
+ pos += 2
+ case '\x1a':
+ buf[pos+1] = 'Z'
+ buf[pos] = '\\'
+ pos += 2
+ case '\'':
+ buf[pos+1] = '\''
+ buf[pos] = '\\'
+ pos += 2
+ case '"':
+ buf[pos+1] = '"'
+ buf[pos] = '\\'
+ pos += 2
+ case '\\':
+ buf[pos+1] = '\\'
+ buf[pos] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+func escapeStringBackslash(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ switch c {
+ case '\x00':
+ buf[pos+1] = '0'
+ buf[pos] = '\\'
+ pos += 2
+ case '\n':
+ buf[pos+1] = 'n'
+ buf[pos] = '\\'
+ pos += 2
+ case '\r':
+ buf[pos+1] = 'r'
+ buf[pos] = '\\'
+ pos += 2
+ case '\x1a':
+ buf[pos+1] = 'Z'
+ buf[pos] = '\\'
+ pos += 2
+ case '\'':
+ buf[pos+1] = '\''
+ buf[pos] = '\\'
+ pos += 2
+ case '"':
+ buf[pos+1] = '"'
+ buf[pos] = '\\'
+ pos += 2
+ case '\\':
+ buf[pos+1] = '\\'
+ buf[pos] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+// This escapes the contents of a string by doubling up any apostrophes that
+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+// effect on the server.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+func escapeBytesQuotes(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ if c == '\'' {
+ buf[pos+1] = '\''
+ buf[pos] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+func escapeStringQuotes(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ if c == '\'' {
+ buf[pos+1] = '\''
+ buf[pos] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// Unlock is a no-op used by -copylocks checker from `go vet`.
+// noCopy should implement sync.Locker from Go 1.11
+// https://github.com/golang/go/commit/c2eba53e7f80df21d51285879d51ab81bcfbf6bc
+// https://github.com/golang/go/issues/26165
+func (*noCopy) Unlock() {}
+
+// atomicError is a wrapper for atomically accessed error values
+type atomicError struct {
+ _ noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
+ }
+}
diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE
new file mode 100644
index 0000000..ae121a1
--- /dev/null
+++ b/vendor/github.com/google/go-querystring/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 Google. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go
new file mode 100644
index 0000000..91198f8
--- /dev/null
+++ b/vendor/github.com/google/go-querystring/query/encode.go
@@ -0,0 +1,357 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package query implements encoding of structs into URL query parameters.
+//
+// As a simple example:
+//
+// type Options struct {
+// Query string `url:"q"`
+// ShowAll bool `url:"all"`
+// Page int `url:"page"`
+// }
+//
+// opt := Options{ "foo", true, 2 }
+// v, _ := query.Values(opt)
+// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2"
+//
+// The exact mapping between Go values and url.Values is described in the
+// documentation for the Values() function.
+package query
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var timeType = reflect.TypeOf(time.Time{})
+
+var encoderType = reflect.TypeOf(new(Encoder)).Elem()
+
+// Encoder is an interface implemented by any type that wishes to encode
+// itself into URL values in a non-standard way.
+type Encoder interface {
+ EncodeValues(key string, v *url.Values) error
+}
+
+// Values returns the url.Values encoding of v.
+//
+// Values expects to be passed a struct, and traverses it recursively using the
+// following encoding rules.
+//
+// Each exported struct field is encoded as a URL parameter unless
+//
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option
+//
+// The empty values are false, 0, any nil pointer or interface value, any array
+// slice, map, or string of length zero, and any type (such as time.Time) that
+// returns true for IsZero().
+//
+// The URL parameter name defaults to the struct field name but can be
+// specified in the struct field's tag value. The "url" key in the struct
+// field's tag value is the key name, followed by an optional comma and
+// options. For example:
+//
+// // Field is ignored by this package.
+// Field int `url:"-"`
+//
+// // Field appears as URL parameter "myName".
+// Field int `url:"myName"`
+//
+// // Field appears as URL parameter "myName" and the field is omitted if
+// // its value is empty
+// Field int `url:"myName,omitempty"`
+//
+// // Field appears as URL parameter "Field" (the default), but the field
+// // is skipped if empty. Note the leading comma.
+// Field int `url:",omitempty"`
+//
+// For encoding individual field values, the following type-dependent rules
+// apply:
+//
+// Boolean values default to encoding as the strings "true" or "false".
+// Including the "int" option signals that the field should be encoded as the
+// strings "1" or "0".
+//
+// time.Time values default to encoding as RFC3339 timestamps. Including the
+// "unix" option signals that the field should be encoded as a Unix time (see
+// time.Unix()). The "unixmilli" and "unixnano" options will encode the number
+// of milliseconds and nanoseconds, respectively, since January 1, 1970 (see
+// time.UnixNano()). Including the "layout" struct tag (separate from the
+// "url" tag) will use the value of the "layout" tag as a layout passed to
+// time.Format. For example:
+//
+// // Encode a time.Time as YYYY-MM-DD
+// Field time.Time `layout:"2006-01-02"`
+//
+// Slice and Array values default to encoding as multiple URL values of the
+// same name. Including the "comma" option signals that the field should be
+// encoded as a single comma-delimited value. Including the "space" option
+// similarly encodes the value as a single space-delimited string. Including
+// the "semicolon" option will encode the value as a semicolon-delimited string.
+// Including the "brackets" option signals that the multiple URL values should
+// have "[]" appended to the value name. "numbered" will append a number to
+// the end of each incidence of the value name, example:
+// name0=value0&name1=value1, etc. Including the "del" struct tag (separate
+// from the "url" tag) will use the value of the "del" tag as the delimiter.
+// For example:
+//
+// // Encode a slice of bools as ints ("1" for true, "0" for false),
+// // separated by exclamation points "!".
+// Field []bool `url:",int" del:"!"`
+//
+// Anonymous struct fields are usually encoded as if their inner exported
+// fields were fields in the outer struct, subject to the standard Go
+// visibility rules. An anonymous struct field with a name given in its URL
+// tag is treated as having that name, rather than being anonymous.
+//
+// Non-nil pointer values are encoded as the value pointed to.
+//
+// Nested structs are encoded including parent fields in value names for
+// scoping. e.g:
+//
+// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO"
+//
+// All other values are encoded using their default string representation.
+//
+// Multiple fields that encode to the same URL parameter name will be included
+// as multiple URL values of the same name.
+func Values(v interface{}) (url.Values, error) {
+ values := make(url.Values)
+ val := reflect.ValueOf(v)
+ for val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ return values, nil
+ }
+ val = val.Elem()
+ }
+
+ if v == nil {
+ return values, nil
+ }
+
+ if val.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind())
+ }
+
+ err := reflectValue(values, val, "")
+ return values, err
+}
+
+// reflectValue populates the values parameter from the struct fields in val.
+// Embedded structs are followed recursively (using the rules defined in the
+// Values function documentation) breadth-first.
+func reflectValue(values url.Values, val reflect.Value, scope string) error {
+ var embedded []reflect.Value
+
+ typ := val.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ sf := typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+
+ sv := val.Field(i)
+ tag := sf.Tag.Get("url")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+
+ if name == "" {
+ if sf.Anonymous {
+ v := reflect.Indirect(sv)
+ if v.IsValid() && v.Kind() == reflect.Struct {
+ // save embedded struct for later processing
+ embedded = append(embedded, v)
+ continue
+ }
+ }
+
+ name = sf.Name
+ }
+
+ if scope != "" {
+ name = scope + "[" + name + "]"
+ }
+
+ if opts.Contains("omitempty") && isEmptyValue(sv) {
+ continue
+ }
+
+ if sv.Type().Implements(encoderType) {
+ // if sv is a nil pointer and the custom encoder is defined on a non-pointer
+ // method receiver, set sv to the zero value of the underlying type
+ if !reflect.Indirect(sv).IsValid() && sv.Type().Elem().Implements(encoderType) {
+ sv = reflect.New(sv.Type().Elem())
+ }
+
+ m := sv.Interface().(Encoder)
+ if err := m.EncodeValues(name, &values); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // recursively dereference pointers. break on nil pointers
+ for sv.Kind() == reflect.Ptr {
+ if sv.IsNil() {
+ break
+ }
+ sv = sv.Elem()
+ }
+
+ if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array {
+ var del string
+ if opts.Contains("comma") {
+ del = ","
+ } else if opts.Contains("space") {
+ del = " "
+ } else if opts.Contains("semicolon") {
+ del = ";"
+ } else if opts.Contains("brackets") {
+ name = name + "[]"
+ } else {
+ del = sf.Tag.Get("del")
+ }
+
+ if del != "" {
+ s := new(bytes.Buffer)
+ first := true
+ for i := 0; i < sv.Len(); i++ {
+ if first {
+ first = false
+ } else {
+ s.WriteString(del)
+ }
+ s.WriteString(valueString(sv.Index(i), opts, sf))
+ }
+ values.Add(name, s.String())
+ } else {
+ for i := 0; i < sv.Len(); i++ {
+ k := name
+ if opts.Contains("numbered") {
+ k = fmt.Sprintf("%s%d", name, i)
+ }
+ values.Add(k, valueString(sv.Index(i), opts, sf))
+ }
+ }
+ continue
+ }
+
+ if sv.Type() == timeType {
+ values.Add(name, valueString(sv, opts, sf))
+ continue
+ }
+
+ if sv.Kind() == reflect.Struct {
+ if err := reflectValue(values, sv, name); err != nil {
+ return err
+ }
+ continue
+ }
+
+ values.Add(name, valueString(sv, opts, sf))
+ }
+
+ for _, f := range embedded {
+ if err := reflectValue(values, f, scope); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// valueString returns the string representation of a value.
+func valueString(v reflect.Value, opts tagOptions, sf reflect.StructField) string {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return ""
+ }
+ v = v.Elem()
+ }
+
+ if v.Kind() == reflect.Bool && opts.Contains("int") {
+ if v.Bool() {
+ return "1"
+ }
+ return "0"
+ }
+
+ if v.Type() == timeType {
+ t := v.Interface().(time.Time)
+ if opts.Contains("unix") {
+ return strconv.FormatInt(t.Unix(), 10)
+ }
+ if opts.Contains("unixmilli") {
+ return strconv.FormatInt((t.UnixNano() / 1e6), 10)
+ }
+ if opts.Contains("unixnano") {
+ return strconv.FormatInt(t.UnixNano(), 10)
+ }
+ if layout := sf.Tag.Get("layout"); layout != "" {
+ return t.Format(layout)
+ }
+ return t.Format(time.RFC3339)
+ }
+
+ return fmt.Sprint(v.Interface())
+}
+
+// isEmptyValue checks if a value should be considered empty for the purposes
+// of omitting fields with the "omitempty" option.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+
+ type zeroable interface {
+ IsZero() bool
+ }
+
+ if z, ok := v.Interface().(zeroable); ok {
+ return z.IsZero()
+ }
+
+ return false
+}
+
+// tagOptions is the string following a comma in a struct field's "url" tag, or
+// the empty string. It does not include the leading comma.
+type tagOptions []string
+
+// parseTag splits a struct field's url tag into its name and comma-separated
+// options.
+func parseTag(tag string) (string, tagOptions) {
+ s := strings.Split(tag, ",")
+ return s[0], s[1:]
+}
+
+// Contains checks whether the tagOptions contains the specified option.
+func (o tagOptions) Contains(option string) bool {
+ for _, s := range o {
+ if s == option {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS
new file mode 100644
index 0000000..b722392
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/AUTHORS
@@ -0,0 +1,8 @@
+# This is the official list of gorilla/mux authors for copyright purposes.
+#
+# Please keep the list sorted.
+
+Google LLC (https://opensource.google.com/)
+Kamil Kisielk
+Matt Silverlock
+Rodrigo Moraes (https://github.com/moraes)
diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE
new file mode 100644
index 0000000..6903df6
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000..35eea9f
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,805 @@
+# gorilla/mux
+
+[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux)
+[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
+
+![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png)
+
+https://www.gorillatoolkit.org/pkg/mux
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts, paths and query values can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Static Files](#static-files)
+* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.)
+* [Registered URLs](#registered-urls)
+* [Walking Routes](#walking-routes)
+* [Graceful Shutdown](#graceful-shutdown)
+* [Middleware](#middleware)
+* [Handling CORS Requests](#handling-cors-requests)
+* [Testing Handlers](#testing-handlers)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.example.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+ return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+ Host("www.example.com").
+ Methods("GET").
+ Schemes("http")
+```
+
+Routes are tested in the order they were added to the router. If two routes match, the first one wins:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/specific", specificHandler)
+r.PathPrefix("/").Handler(catchAllHandler)
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/\*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+ var dir string
+
+ flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+ flag.Parse()
+ r := mux.NewRouter()
+
+ // This will serve files under http://localhost:8000/static/
+ r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+ srv := &http.Server{
+ Handler: r,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
+
+ log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Serving Single Page Applications
+
+Most of the time it makes sense to serve your SPA on a separate web server from your API,
+but sometimes it's desirable to serve them both from one place. It's possible to write a simple
+handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage
+mux's powerful routing for your API endpoints.
+
+```go
+package main
+
+import (
+ "encoding/json"
+ "log"
+ "net/http"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/gorilla/mux"
+)
+
+// spaHandler implements the http.Handler interface, so we can use it
+// to respond to HTTP requests. The path to the static directory and
+// path to the index file within that static directory are used to
+// serve the SPA in the given static directory.
+type spaHandler struct {
+ staticPath string
+ indexPath string
+}
+
+// ServeHTTP inspects the URL path to locate a file within the static dir
+// on the SPA handler. If a file is found, it will be served. If not, the
+// file located at the index path on the SPA handler will be served. This
+// is suitable behavior for serving an SPA (single page application).
+func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // get the absolute path to prevent directory traversal
+ path, err := filepath.Abs(r.URL.Path)
+ if err != nil {
+ // if we failed to get the absolute path respond with a 400 bad request
+ // and stop
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ // prepend the path with the path to the static directory
+ path = filepath.Join(h.staticPath, path)
+
+ // check whether a file exists at the given path
+ _, err = os.Stat(path)
+ if os.IsNotExist(err) {
+ // file does not exist, serve index.html
+ http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
+ return
+ } else if err != nil {
+ // if we got an error (that wasn't that the file doesn't exist) stating the
+ // file, return a 500 internal server error and stop
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // otherwise, use http.FileServer to serve the static dir
+ http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)
+}
+
+func main() {
+ router := mux.NewRouter()
+
+ router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
+ // an example API handler
+ json.NewEncoder(w).Encode(map[string]bool{"ok": true})
+ })
+
+ spa := spaHandler{staticPath: "build", indexPath: "index.html"}
+ router.PathPrefix("/").Handler(spa)
+
+ srv := &http.Server{
+ Handler: router,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
+
+ log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host and query value variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.example.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ Queries("filter", "{filter}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
+url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42",
+ "filter", "gorilla")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.example.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.example.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+// "http://news.example.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+```
+
+### Walking Routes
+
+The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
+the following prints all of the registered routes:
+
+```go
+package main
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ return
+}
+
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.HandleFunc("/products", handler).Methods("POST")
+ r.HandleFunc("/articles", handler).Methods("GET")
+ r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
+ r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
+ err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+ pathTemplate, err := route.GetPathTemplate()
+ if err == nil {
+ fmt.Println("ROUTE:", pathTemplate)
+ }
+ pathRegexp, err := route.GetPathRegexp()
+ if err == nil {
+ fmt.Println("Path regexp:", pathRegexp)
+ }
+ queriesTemplates, err := route.GetQueriesTemplates()
+ if err == nil {
+ fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
+ }
+ queriesRegexps, err := route.GetQueriesRegexp()
+ if err == nil {
+ fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
+ }
+ methods, err := route.GetMethods()
+ if err == nil {
+ fmt.Println("Methods:", strings.Join(methods, ","))
+ }
+ fmt.Println()
+ return nil
+ })
+
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ http.Handle("/", r)
+}
+```
+
+### Graceful Shutdown
+
+Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
+
+```go
+package main
+
+import (
+ "context"
+ "flag"
+ "log"
+ "net/http"
+ "os"
+ "os/signal"
+ "time"
+
+ "github.com/gorilla/mux"
+)
+
+func main() {
+ var wait time.Duration
+ flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
+ flag.Parse()
+
+ r := mux.NewRouter()
+ // Add your routes as needed
+
+ srv := &http.Server{
+ Addr: "0.0.0.0:8080",
+ // Good practice to set timeouts to avoid Slowloris attacks.
+ WriteTimeout: time.Second * 15,
+ ReadTimeout: time.Second * 15,
+ IdleTimeout: time.Second * 60,
+ Handler: r, // Pass our instance of gorilla/mux in.
+ }
+
+ // Run our server in a goroutine so that it doesn't block.
+ go func() {
+ if err := srv.ListenAndServe(); err != nil {
+ log.Println(err)
+ }
+ }()
+
+ c := make(chan os.Signal, 1)
+ // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
+ // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
+ signal.Notify(c, os.Interrupt)
+
+ // Block until we receive our signal.
+ <-c
+
+ // Create a deadline to wait for.
+ ctx, cancel := context.WithTimeout(context.Background(), wait)
+ defer cancel()
+ // Doesn't block if no connections, but will otherwise wait
+ // until the timeout deadline.
+ srv.Shutdown(ctx)
+ // Optionally, you could run srv.Shutdown in a goroutine and block on
+ // <-ctx.Done() if your application should wait for other services
+ // to finalize based on context cancellation.
+ log.Println("shutting down")
+ os.Exit(0)
+}
+```
+
+### Middleware
+
+Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
+Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
+
+Mux middlewares are defined using the de facto standard type:
+
+```go
+type MiddlewareFunc func(http.Handler) http.Handler
+```
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+```go
+func loggingMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Do stuff here
+ log.Println(r.RequestURI)
+ // Call the next handler, which can be another middleware in the chain, or the final handler.
+ next.ServeHTTP(w, r)
+ })
+}
+```
+
+Middlewares can be added to a router using `Router.Use()`:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+r.Use(loggingMiddleware)
+```
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+```go
+// Define our struct
+type authenticationMiddleware struct {
+ tokenUsers map[string]string
+}
+
+// Initialize it somewhere
+func (amw *authenticationMiddleware) Populate() {
+ amw.tokenUsers["00000000"] = "user0"
+ amw.tokenUsers["aaaaaaaa"] = "userA"
+ amw.tokenUsers["05f717e5"] = "randomUser"
+ amw.tokenUsers["deadbeef"] = "user0"
+}
+
+// Middleware function, which will be called for each request
+func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := r.Header.Get("X-Session-Token")
+
+ if user, found := amw.tokenUsers[token]; found {
+ // We found the token in our map
+ log.Printf("Authenticated user %s\n", user)
+ // Pass down the request to the next middleware (or final handler)
+ next.ServeHTTP(w, r)
+ } else {
+ // Write an error and stop the handler chain
+ http.Error(w, "Forbidden", http.StatusForbidden)
+ }
+ })
+}
+```
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+
+amw := authenticationMiddleware{}
+amw.Populate()
+
+r.Use(amw.Middleware)
+```
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
+
+### Handling CORS Requests
+
+[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header.
+
+* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin`
+* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route
+* If you do not specify any methods, then:
+> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers.
+
+Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers:
+
+```go
+package main
+
+import (
+ "net/http"
+ "github.com/gorilla/mux"
+)
+
+func main() {
+ r := mux.NewRouter()
+
+ // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers
+ r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions)
+ r.Use(mux.CORSMethodMiddleware(r))
+
+ http.ListenAndServe(":8080", r)
+}
+
+func fooHandler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ if r.Method == http.MethodOptions {
+ return
+ }
+
+ w.Write([]byte("foo"))
+}
+```
+
+And an request to `/foo` using something like:
+
+```bash
+curl localhost:8080/foo -v
+```
+
+Would look like:
+
+```bash
+* Trying ::1...
+* TCP_NODELAY set
+* Connected to localhost (::1) port 8080 (#0)
+> GET /foo HTTP/1.1
+> Host: localhost:8080
+> User-Agent: curl/7.59.0
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS
+< Access-Control-Allow-Origin: *
+< Date: Fri, 28 Jun 2019 20:13:30 GMT
+< Content-Length: 3
+< Content-Type: text/plain; charset=utf-8
+<
+* Connection #0 to host localhost left intact
+foo
+```
+
+### Testing Handlers
+
+Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
+
+First, our simple HTTP handler:
+
+```go
+// endpoints.go
+package main
+
+func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
+ // A very simple health check.
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+
+ // In the future we could report back on the status of our DB, or our cache
+ // (e.g. Redis) by performing a simple PING, and include them in the response.
+ io.WriteString(w, `{"alive": true}`)
+}
+
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/health", HealthCheckHandler)
+
+ log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test code:
+
+```go
+// endpoints_test.go
+package main
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestHealthCheckHandler(t *testing.T) {
+ // Create a request to pass to our handler. We don't have any query parameters for now, so we'll
+ // pass 'nil' as the third parameter.
+ req, err := http.NewRequest("GET", "/health", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
+ rr := httptest.NewRecorder()
+ handler := http.HandlerFunc(HealthCheckHandler)
+
+ // Our handlers satisfy http.Handler, so we can call their ServeHTTP method
+ // directly and pass in our Request and ResponseRecorder.
+ handler.ServeHTTP(rr, req)
+
+ // Check the status code is what we expect.
+ if status := rr.Code; status != http.StatusOK {
+ t.Errorf("handler returned wrong status code: got %v want %v",
+ status, http.StatusOK)
+ }
+
+ // Check the response body is what we expect.
+ expected := `{"alive": true}`
+ if rr.Body.String() != expected {
+ t.Errorf("handler returned unexpected body: got %v want %v",
+ rr.Body.String(), expected)
+ }
+}
+```
+
+In the case that our routes have [variables](#examples), we can pass those in the request. We could write
+[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
+possible route variables as needed.
+
+```go
+// endpoints.go
+func main() {
+ r := mux.NewRouter()
+ // A route with a route variable:
+ r.HandleFunc("/metrics/{type}", MetricsHandler)
+
+ log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test file, with a table-driven test of `routeVariables`:
+
+```go
+// endpoints_test.go
+func TestMetricsHandler(t *testing.T) {
+ tt := []struct{
+ routeVariable string
+ shouldPass bool
+ }{
+ {"goroutines", true},
+ {"heap", true},
+ {"counters", true},
+ {"queries", true},
+ {"adhadaeqm3k", false},
+ }
+
+ for _, tc := range tt {
+ path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
+ req, err := http.NewRequest("GET", path, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rr := httptest.NewRecorder()
+
+ // Need to create a router that we can pass the request through so that the vars will be added to the context
+ router := mux.NewRouter()
+ router.HandleFunc("/metrics/{type}", MetricsHandler)
+ router.ServeHTTP(rr, req)
+
+ // In this case, our MetricsHandler returns a non-200 response
+ // for a route variable it doesn't know about.
+ if rr.Code == http.StatusOK && !tc.shouldPass {
+ t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
+ tc.routeVariable, rr.Code, http.StatusOK)
+ }
+ }
+}
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+ "net/http"
+ "log"
+ "github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+ r := mux.NewRouter()
+ // Routes consist of a path and a handler function.
+ r.HandleFunc("/", YourHandler)
+
+ // Bind to a port and pass our router in
+ log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000..bd5a38b
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,306 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+ * Requests can be matched based on URL host, path, path prefix, schemes,
+ header and query values, HTTP methods or using custom matchers.
+ * URL hosts, paths and query values can have variables with an optional
+ regular expression.
+ * Registered URLs can be built, or "reversed", which helps maintaining
+ references to resources.
+ * Routes can be used as subrouters: nested routes are only tested if the
+ parent route matches. This is useful to define groups of routes that
+ share common conditions like a host, a path prefix or other repeated
+ attributes. As a bonus, this optimizes request matching.
+ * It implements the http.Handler interface so it is compatible with the
+ standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+ func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
+ }
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/products/{key}", ProductHandler)
+ r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+ r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+ vars := mux.Vars(request)
+ category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+ r := mux.NewRouter()
+ // Only matches if domain is "www.example.com".
+ r.Host("www.example.com")
+ // Matches a dynamic subdomain.
+ r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+ r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+ r.Methods("GET", "POST")
+
+...or URL schemes:
+
+ r.Schemes("https")
+
+...or header values:
+
+ r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+ r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+ r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+ return r.ProtoMajor == 0
+ })
+
+...and finally, it is possible to combine several matchers in a single route:
+
+ r.HandleFunc("/products", ProductsHandler).
+ Host("www.example.com").
+ Methods("GET").
+ Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+ r := mux.NewRouter()
+ s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+ s.HandleFunc("/products/", ProductsHandler)
+ s.HandleFunc("/products/{key}", ProductHandler)
+ s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+ r := mux.NewRouter()
+ s := r.PathPrefix("/products").Subrouter()
+ // "/products/"
+ s.HandleFunc("/", ProductsHandler)
+ // "/products/{key}/"
+ s.HandleFunc("/{key}/", ProductHandler)
+ // "/products/{key}/details"
+ s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+ func main() {
+ var dir string
+
+ flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+ flag.Parse()
+ r := mux.NewRouter()
+
+ // This will serve files under http://localhost:8000/static/
+ r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+ srv := &http.Server{
+ Handler: r,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
+
+ log.Fatal(srv.ListenAndServe())
+ }
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+ url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+ "/articles/technology/42"
+
+This also works for host and query value variables:
+
+ r := mux.NewRouter()
+ r.Host("{subdomain}.domain.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ Queries("filter", "{filter}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42",
+ "filter", "gorilla")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+ r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+ // "http://news.domain.com/"
+ host, err := r.Get("article").URLHost("subdomain", "news")
+
+ // "/articles/technology/42"
+ path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+ r := mux.NewRouter()
+ s := r.Host("{subdomain}.domain.com").Subrouter()
+ s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+
+Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
+
+ type MiddlewareFunc func(http.Handler) http.Handler
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+ func simpleMw(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Do stuff here
+ log.Println(r.RequestURI)
+ // Call the next handler, which can be another middleware in the chain, or the final handler.
+ next.ServeHTTP(w, r)
+ })
+ }
+
+Middlewares can be added to a router using `Router.Use()`:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.Use(simpleMw)
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+ // Define our struct
+ type authenticationMiddleware struct {
+ tokenUsers map[string]string
+ }
+
+ // Initialize it somewhere
+ func (amw *authenticationMiddleware) Populate() {
+ amw.tokenUsers["00000000"] = "user0"
+ amw.tokenUsers["aaaaaaaa"] = "userA"
+ amw.tokenUsers["05f717e5"] = "randomUser"
+ amw.tokenUsers["deadbeef"] = "user0"
+ }
+
+ // Middleware function, which will be called for each request
+ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := r.Header.Get("X-Session-Token")
+
+ if user, found := amw.tokenUsers[token]; found {
+ // We found the token in our map
+ log.Printf("Authenticated user %s\n", user)
+ next.ServeHTTP(w, r)
+ } else {
+ http.Error(w, "Forbidden", http.StatusForbidden)
+ }
+ })
+ }
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+
+ amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
+ amw.Populate()
+
+ r.Use(amw.Middleware)
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
+
+*/
+package mux
diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go
new file mode 100644
index 0000000..cb51c56
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/middleware.go
@@ -0,0 +1,74 @@
+package mux
+
+import (
+ "net/http"
+ "strings"
+)
+
+// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
+// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
+// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
+type MiddlewareFunc func(http.Handler) http.Handler
+
+// middleware interface is anything which implements a MiddlewareFunc named Middleware.
+type middleware interface {
+ Middleware(handler http.Handler) http.Handler
+}
+
+// Middleware allows MiddlewareFunc to implement the middleware interface.
+func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
+ return mw(handler)
+}
+
+// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) Use(mwf ...MiddlewareFunc) {
+ for _, fn := range mwf {
+ r.middlewares = append(r.middlewares, fn)
+ }
+}
+
+// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) useInterface(mw middleware) {
+ r.middlewares = append(r.middlewares, mw)
+}
+
+// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header
+// on requests for routes that have an OPTIONS method matcher to all the method matchers on
+// the route. Routes that do not explicitly handle OPTIONS requests will not be processed
+// by the middleware. See examples for usage.
+func CORSMethodMiddleware(r *Router) MiddlewareFunc {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ allMethods, err := getAllMethodsForRoute(r, req)
+ if err == nil {
+ for _, v := range allMethods {
+ if v == http.MethodOptions {
+ w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ","))
+ }
+ }
+ }
+
+ next.ServeHTTP(w, req)
+ })
+ }
+}
+
+// getAllMethodsForRoute returns all the methods from method matchers matching a given
+// request.
+func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
+ var allMethods []string
+
+ for _, route := range r.routes {
+ var match RouteMatch
+ if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch {
+ methods, err := route.GetMethods()
+ if err != nil {
+ return nil, err
+ }
+
+ allMethods = append(allMethods, methods...)
+ }
+ }
+
+ return allMethods, nil
+}
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000..782a34b
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,606 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "path"
+ "regexp"
+)
+
+var (
+ // ErrMethodMismatch is returned when the method in the request does not match
+ // the method defined against the route.
+ ErrMethodMismatch = errors.New("method is not allowed")
+ // ErrNotFound is returned when no route match is found.
+ ErrNotFound = errors.New("no matching route was found")
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+ return &Router{namedRoutes: make(map[string]*Route)}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+// var router = mux.NewRouter()
+//
+// func main() {
+// http.Handle("/", router)
+// }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+// func init() {
+// http.Handle("/", router)
+// }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+ // Configurable Handler to be used when no route matches.
+ NotFoundHandler http.Handler
+
+ // Configurable Handler to be used when the request method does not match the route.
+ MethodNotAllowedHandler http.Handler
+
+ // Routes to be matched, in order.
+ routes []*Route
+
+ // Routes by name for URL building.
+ namedRoutes map[string]*Route
+
+ // If true, do not clear the request context after handling the request.
+ //
+ // Deprecated: No effect, since the context is stored on the request itself.
+ KeepContext bool
+
+ // Slice of middlewares to be called after a match is found
+ middlewares []middleware
+
+ // configuration shared with `Route`
+ routeConf
+}
+
+// common route configuration shared between `Router` and `Route`
+type routeConf struct {
+ // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+ useEncodedPath bool
+
+ // If true, when the path pattern is "/path/", accessing "/path" will
+ // redirect to the former and vice versa.
+ strictSlash bool
+
+ // If true, when the path pattern is "/path//to", accessing "/path//to"
+ // will not redirect
+ skipClean bool
+
+ // Manager for the variables from host and path.
+ regexp routeRegexpGroup
+
+ // List of matchers.
+ matchers []matcher
+
+ // The scheme used when building URLs.
+ buildScheme string
+
+ buildVarsFunc BuildVarsFunc
+}
+
+// returns an effective deep copy of `routeConf`
+func copyRouteConf(r routeConf) routeConf {
+ c := r
+
+ if r.regexp.path != nil {
+ c.regexp.path = copyRouteRegexp(r.regexp.path)
+ }
+
+ if r.regexp.host != nil {
+ c.regexp.host = copyRouteRegexp(r.regexp.host)
+ }
+
+ c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
+ for _, q := range r.regexp.queries {
+ c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
+ }
+
+ c.matchers = make([]matcher, len(r.matchers))
+ copy(c.matchers, r.matchers)
+
+ return c
+}
+
+func copyRouteRegexp(r *routeRegexp) *routeRegexp {
+ c := *r
+ return &c
+}
+
+// Match attempts to match the given request against the router's registered routes.
+//
+// If the request matches a route of this router or one of its subrouters the Route,
+// Handler, and Vars fields of the the match argument are filled and this function
+// returns true.
+//
+// If the request does not match any of this router's or its subrouters' routes
+// then this function returns false. If available, a reason for the match failure
+// will be filled in the match argument's MatchErr field. If the match failure type
+// (eg: not found) has a registered handler, the handler is assigned to the Handler
+// field of the match argument.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+ for _, route := range r.routes {
+ if route.Match(req, match) {
+ // Build middleware chain if no error was found
+ if match.MatchErr == nil {
+ for i := len(r.middlewares) - 1; i >= 0; i-- {
+ match.Handler = r.middlewares[i].Middleware(match.Handler)
+ }
+ }
+ return true
+ }
+ }
+
+ if match.MatchErr == ErrMethodMismatch {
+ if r.MethodNotAllowedHandler != nil {
+ match.Handler = r.MethodNotAllowedHandler
+ return true
+ }
+
+ return false
+ }
+
+ // Closest match for a router (includes sub-routers)
+ if r.NotFoundHandler != nil {
+ match.Handler = r.NotFoundHandler
+ match.MatchErr = ErrNotFound
+ return true
+ }
+
+ match.MatchErr = ErrNotFound
+ return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if !r.skipClean {
+ path := req.URL.Path
+ if r.useEncodedPath {
+ path = req.URL.EscapedPath()
+ }
+ // Clean path to canonical form and redirect.
+ if p := cleanPath(path); p != path {
+
+ // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+ // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
+ // http://code.google.com/p/go/issues/detail?id=5252
+ url := *req.URL
+ url.Path = p
+ p = url.String()
+
+ w.Header().Set("Location", p)
+ w.WriteHeader(http.StatusMovedPermanently)
+ return
+ }
+ }
+ var match RouteMatch
+ var handler http.Handler
+ if r.Match(req, &match) {
+ handler = match.Handler
+ req = requestWithVars(req, match.Vars)
+ req = requestWithRoute(req, match.Route)
+ }
+
+ if handler == nil && match.MatchErr == ErrMethodMismatch {
+ handler = methodNotAllowedHandler()
+ }
+
+ if handler == nil {
+ handler = http.NotFoundHandler()
+ }
+
+ handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+ return r.namedRoutes[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+ return r.namedRoutes[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will perform a redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
+// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
+// request will be made as a GET by most clients. Use middleware or client settings
+// to modify this behaviour as needed.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+ r.strictSlash = value
+ return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+ r.skipClean = value
+ return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+ r.useEncodedPath = true
+ return r
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+ // initialize a route with a copy of the parent router's configuration
+ route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
+ r.routes = append(r.routes, route)
+ return route
+}
+
+// Name registers a new route with a name.
+// See Route.Name().
+func (r *Router) Name(name string) *Route {
+ return r.NewRoute().Name(name)
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+ return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+ *http.Request)) *Route {
+ return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+ return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+ return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+ return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+ return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+ return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+ return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+ return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+ return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+ return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+ return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+ for _, t := range r.routes {
+ err := walkFn(t, r, ancestors)
+ if err == SkipRouter {
+ continue
+ }
+ if err != nil {
+ return err
+ }
+ for _, sr := range t.matchers {
+ if h, ok := sr.(*Router); ok {
+ ancestors = append(ancestors, t)
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ ancestors = ancestors[:len(ancestors)-1]
+ }
+ }
+ if h, ok := t.handler.(*Router); ok {
+ ancestors = append(ancestors, t)
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ ancestors = ancestors[:len(ancestors)-1]
+ }
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+ Route *Route
+ Handler http.Handler
+ Vars map[string]string
+
+ // MatchErr is set to appropriate matching error
+ // It is set to ErrMethodMismatch if there is a mismatch in
+ // the request method and route method
+ MatchErr error
+}
+
+type contextKey int
+
+const (
+ varsKey contextKey = iota
+ routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+ if rv := r.Context().Value(varsKey); rv != nil {
+ return rv.(map[string]string)
+ }
+ return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns.
+func CurrentRoute(r *http.Request) *Route {
+ if rv := r.Context().Value(routeKey); rv != nil {
+ return rv.(*Route)
+ }
+ return nil
+}
+
+func requestWithVars(r *http.Request, vars map[string]string) *http.Request {
+ ctx := context.WithValue(r.Context(), varsKey, vars)
+ return r.WithContext(ctx)
+}
+
+func requestWithRoute(r *http.Request, route *Route) *http.Request {
+ ctx := context.WithValue(r.Context(), routeKey, route)
+ return r.WithContext(ctx)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root;
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ np += "/"
+ }
+
+ return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+ for _, v1 := range s1 {
+ for _, v2 := range s2 {
+ if v1 == v2 {
+ return fmt.Errorf("mux: duplicated route variable %q", v2)
+ }
+ }
+ }
+ return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+ length := len(pairs)
+ if length%2 != 0 {
+ return length, fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ }
+ return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]string, length/2)
+ for i := 0; i < length; i += 2 {
+ m[pairs[i]] = pairs[i+1]
+ }
+ return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string parameters to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]*regexp.Regexp, length/2)
+ for i := 0; i < length; i += 2 {
+ regex, err := regexp.Compile(pairs[i+1])
+ if err != nil {
+ return nil, err
+ }
+ m[pairs[i]] = regex
+ }
+ return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+ for _, v := range arr {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != "" {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v == value {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != nil {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v.MatchString(value) {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// methodNotAllowed replies to the request with an HTTP status code 405.
+func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+}
+
+// methodNotAllowedHandler returns a simple request handler
+// that replies to each request with a status code 405.
+func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000..0144842
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,388 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type routeRegexpOptions struct {
+ strictSlash bool
+ useEncodedPath bool
+}
+
+type regexpType int
+
+const (
+ regexpTypePath regexpType = 0
+ regexpTypeHost regexpType = 1
+ regexpTypePrefix regexpType = 2
+ regexpTypeQuery regexpType = 3
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
+ // Check if it is well-formed.
+ idxs, errBraces := braceIndices(tpl)
+ if errBraces != nil {
+ return nil, errBraces
+ }
+ // Backup the original.
+ template := tpl
+ // Now let's parse it.
+ defaultPattern := "[^/]+"
+ if typ == regexpTypeQuery {
+ defaultPattern = ".*"
+ } else if typ == regexpTypeHost {
+ defaultPattern = "[^.]+"
+ }
+ // Only match strict slash if not matching
+ if typ != regexpTypePath {
+ options.strictSlash = false
+ }
+ // Set a flag for strictSlash.
+ endSlash := false
+ if options.strictSlash && strings.HasSuffix(tpl, "/") {
+ tpl = tpl[:len(tpl)-1]
+ endSlash = true
+ }
+ varsN := make([]string, len(idxs)/2)
+ varsR := make([]*regexp.Regexp, len(idxs)/2)
+ pattern := bytes.NewBufferString("")
+ pattern.WriteByte('^')
+ reverse := bytes.NewBufferString("")
+ var end int
+ var err error
+ for i := 0; i < len(idxs); i += 2 {
+ // Set all values we are interested in.
+ raw := tpl[end:idxs[i]]
+ end = idxs[i+1]
+ parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+ name := parts[0]
+ patt := defaultPattern
+ if len(parts) == 2 {
+ patt = parts[1]
+ }
+ // Name or pattern can't be empty.
+ if name == "" || patt == "" {
+ return nil, fmt.Errorf("mux: missing name or pattern in %q",
+ tpl[idxs[i]:end])
+ }
+ // Build the regexp pattern.
+ fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+ // Build the reverse template.
+ fmt.Fprintf(reverse, "%s%%s", raw)
+
+ // Append variable name and compiled pattern.
+ varsN[i/2] = name
+ varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Add the remaining.
+ raw := tpl[end:]
+ pattern.WriteString(regexp.QuoteMeta(raw))
+ if options.strictSlash {
+ pattern.WriteString("[/]?")
+ }
+ if typ == regexpTypeQuery {
+ // Add the default pattern if the query value is empty
+ if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+ pattern.WriteString(defaultPattern)
+ }
+ }
+ if typ != regexpTypePrefix {
+ pattern.WriteByte('$')
+ }
+
+ var wildcardHostPort bool
+ if typ == regexpTypeHost {
+ if !strings.Contains(pattern.String(), ":") {
+ wildcardHostPort = true
+ }
+ }
+ reverse.WriteString(raw)
+ if endSlash {
+ reverse.WriteByte('/')
+ }
+ // Compile full regexp.
+ reg, errCompile := regexp.Compile(pattern.String())
+ if errCompile != nil {
+ return nil, errCompile
+ }
+
+ // Check for capturing groups which used to work in older versions
+ if reg.NumSubexp() != len(idxs)/2 {
+ panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+ "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+ }
+
+ // Done!
+ return &routeRegexp{
+ template: template,
+ regexpType: typ,
+ options: options,
+ regexp: reg,
+ reverse: reverse.String(),
+ varsN: varsN,
+ varsR: varsR,
+ wildcardHostPort: wildcardHostPort,
+ }, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+ // The unmodified template.
+ template string
+ // The type of match
+ regexpType regexpType
+ // Options for matching
+ options routeRegexpOptions
+ // Expanded regexp.
+ regexp *regexp.Regexp
+ // Reverse template.
+ reverse string
+ // Variable names.
+ varsN []string
+ // Variable regexps (validators).
+ varsR []*regexp.Regexp
+ // Wildcard host-port (no strict port match in hostname)
+ wildcardHostPort bool
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+ if r.regexpType == regexpTypeHost {
+ host := getHost(req)
+ if r.wildcardHostPort {
+ // Don't be strict on the port match
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ }
+ return r.regexp.MatchString(host)
+ }
+
+ if r.regexpType == regexpTypeQuery {
+ return r.matchQueryString(req)
+ }
+ path := req.URL.Path
+ if r.options.useEncodedPath {
+ path = req.URL.EscapedPath()
+ }
+ return r.regexp.MatchString(path)
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+ urlValues := make([]interface{}, len(r.varsN), len(r.varsN))
+ for k, v := range r.varsN {
+ value, ok := values[v]
+ if !ok {
+ return "", fmt.Errorf("mux: missing route variable %q", v)
+ }
+ if r.regexpType == regexpTypeQuery {
+ value = url.QueryEscape(value)
+ }
+ urlValues[k] = value
+ }
+ rv := fmt.Sprintf(r.reverse, urlValues...)
+ if !r.regexp.MatchString(rv) {
+ // The URL is checked against the full regexp, instead of checking
+ // individual variables. This is faster but to provide a good error
+ // message, we check individual regexps if the URL doesn't match.
+ for k, v := range r.varsN {
+ if !r.varsR[k].MatchString(values[v]) {
+ return "", fmt.Errorf(
+ "mux: variable %q doesn't match, expected %q", values[v],
+ r.varsR[k].String())
+ }
+ }
+ }
+ return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+ if r.regexpType != regexpTypeQuery {
+ return ""
+ }
+ templateKey := strings.SplitN(r.template, "=", 2)[0]
+ val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey)
+ if ok {
+ return templateKey + "=" + val
+ }
+ return ""
+}
+
+// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0].
+// If key was not found, empty string and false is returned.
+func findFirstQueryKey(rawQuery, key string) (value string, ok bool) {
+ query := []byte(rawQuery)
+ for len(query) > 0 {
+ foundKey := query
+ if i := bytes.IndexAny(foundKey, "&;"); i >= 0 {
+ foundKey, query = foundKey[:i], foundKey[i+1:]
+ } else {
+ query = query[:0]
+ }
+ if len(foundKey) == 0 {
+ continue
+ }
+ var value []byte
+ if i := bytes.IndexByte(foundKey, '='); i >= 0 {
+ foundKey, value = foundKey[:i], foundKey[i+1:]
+ }
+ if len(foundKey) < len(key) {
+ // Cannot possibly be key.
+ continue
+ }
+ keyString, err := url.QueryUnescape(string(foundKey))
+ if err != nil {
+ continue
+ }
+ if keyString != key {
+ continue
+ }
+ valueString, err := url.QueryUnescape(string(value))
+ if err != nil {
+ continue
+ }
+ return valueString, true
+ }
+ return "", false
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+ return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+ var level, idx int
+ var idxs []int
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '{':
+ if level++; level == 1 {
+ idx = i
+ }
+ case '}':
+ if level--; level == 0 {
+ idxs = append(idxs, idx, i+1)
+ } else if level < 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ }
+ }
+ if level != 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+ return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+ host *routeRegexp
+ path *routeRegexp
+ queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+ // Store host variables.
+ if v.host != nil {
+ host := getHost(req)
+ if v.host.wildcardHostPort {
+ // Don't be strict on the port match
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ }
+ matches := v.host.regexp.FindStringSubmatchIndex(host)
+ if len(matches) > 0 {
+ extractVars(host, matches, v.host.varsN, m.Vars)
+ }
+ }
+ path := req.URL.Path
+ if r.useEncodedPath {
+ path = req.URL.EscapedPath()
+ }
+ // Store path variables.
+ if v.path != nil {
+ matches := v.path.regexp.FindStringSubmatchIndex(path)
+ if len(matches) > 0 {
+ extractVars(path, matches, v.path.varsN, m.Vars)
+ // Check if we should redirect.
+ if v.path.options.strictSlash {
+ p1 := strings.HasSuffix(path, "/")
+ p2 := strings.HasSuffix(v.path.template, "/")
+ if p1 != p2 {
+ u, _ := url.Parse(req.URL.String())
+ if p1 {
+ u.Path = u.Path[:len(u.Path)-1]
+ } else {
+ u.Path += "/"
+ }
+ m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
+ }
+ }
+ }
+ }
+ // Store query string variables.
+ for _, q := range v.queries {
+ queryURL := q.getURLQuery(req)
+ matches := q.regexp.FindStringSubmatchIndex(queryURL)
+ if len(matches) > 0 {
+ extractVars(queryURL, matches, q.varsN, m.Vars)
+ }
+ }
+}
+
+// getHost tries its best to return the request host.
+// According to section 14.23 of RFC 2616 the Host header
+// can include the port number if the default value of 80 is not used.
+func getHost(r *http.Request) string {
+ if r.URL.IsAbs() {
+ return r.URL.Host
+ }
+ return r.Host
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+ for i, name := range names {
+ output[name] = input[matches[2*i+2]:matches[2*i+3]]
+ }
+}
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000..750afe5
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,736 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+ // Request handler for the route.
+ handler http.Handler
+ // If true, this route never matches: it is only used to build URLs.
+ buildOnly bool
+ // The name used to build URLs.
+ name string
+ // Error resulted from building a route.
+ err error
+
+ // "global" reference to all named routes
+ namedRoutes map[string]*Route
+
+ // config possibly passed in from `Router`
+ routeConf
+}
+
+// SkipClean reports whether path cleaning is enabled for this route via
+// Router.SkipClean.
+func (r *Route) SkipClean() bool {
+ return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+ if r.buildOnly || r.err != nil {
+ return false
+ }
+
+ var matchErr error
+
+ // Match everything.
+ for _, m := range r.matchers {
+ if matched := m.Match(req, match); !matched {
+ if _, ok := m.(methodMatcher); ok {
+ matchErr = ErrMethodMismatch
+ continue
+ }
+
+ // Ignore ErrNotFound errors. These errors arise from match call
+ // to Subrouters.
+ //
+ // This prevents subsequent matching subrouters from failing to
+ // run middleware. If not ignored, the middleware would see a
+ // non-nil MatchErr and be skipped, even when there was a
+ // matching route.
+ if match.MatchErr == ErrNotFound {
+ match.MatchErr = nil
+ }
+
+ matchErr = nil
+ return false
+ }
+ }
+
+ if matchErr != nil {
+ match.MatchErr = matchErr
+ return false
+ }
+
+ if match.MatchErr == ErrMethodMismatch && r.handler != nil {
+ // We found a route which matches request method, clear MatchErr
+ match.MatchErr = nil
+ // Then override the mis-matched handler
+ match.Handler = r.handler
+ }
+
+ // Yay, we have a match. Let's collect some info about it.
+ if match.Route == nil {
+ match.Route = r
+ }
+ if match.Handler == nil {
+ match.Handler = r.handler
+ }
+ if match.Vars == nil {
+ match.Vars = make(map[string]string)
+ }
+
+ // Set variables.
+ r.regexp.setMatch(req, match, r)
+ return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+ return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+ r.buildOnly = true
+ return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+ if r.err == nil {
+ r.handler = handler
+ }
+ return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+ return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+ return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// It is an error to call Name more than once on a route.
+func (r *Route) Name(name string) *Route {
+ if r.name != "" {
+ r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+ r.name, name)
+ }
+ if r.err == nil {
+ r.name = name
+ r.namedRoutes[name] = r
+ }
+ return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+ return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+ Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+ if r.err == nil {
+ r.matchers = append(r.matchers, m)
+ }
+ return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
+ if r.err != nil {
+ return r.err
+ }
+ if typ == regexpTypePath || typ == regexpTypePrefix {
+ if len(tpl) > 0 && tpl[0] != '/' {
+ return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+ }
+ if r.regexp.path != nil {
+ tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+ }
+ }
+ rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
+ strictSlash: r.strictSlash,
+ useEncodedPath: r.useEncodedPath,
+ })
+ if err != nil {
+ return err
+ }
+ for _, q := range r.regexp.queries {
+ if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+ return err
+ }
+ }
+ if typ == regexpTypeHost {
+ if r.regexp.path != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+ return err
+ }
+ }
+ r.regexp.host = rr
+ } else {
+ if r.regexp.host != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+ return err
+ }
+ }
+ if typ == regexpTypeQuery {
+ r.regexp.queries = append(r.regexp.queries, rr)
+ } else {
+ r.regexp.path = rr
+ }
+ }
+ r.addMatcher(rr)
+ return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+// r := mux.NewRouter()
+// r.Headers("Content-Type", "application/json",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]string
+ headers, r.err = mapFromPairsToString(pairs...)
+ return r.addMatcher(headerMatcher(headers))
+ }
+ return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+// r := mux.NewRouter()
+// r.HeadersRegexp("Content-Type", "application/(text|json)",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// If the value is an empty string, it will match any value if the key is set.
+// Use the start and end of string anchors (^ and $) to match an exact value.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]*regexp.Regexp
+ headers, r.err = mapFromPairsToRegex(pairs...)
+ return r.addMatcher(headerRegexMatcher(headers))
+ }
+ return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Host("www.example.com")
+// r.Host("{subdomain}.domain.com")
+// r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
+ return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+ return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+ return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+ for k, v := range methods {
+ methods[k] = strings.ToUpper(v)
+ }
+ return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Path("/products/").Handler(ProductsHandler)
+// r.Path("/products/{key}").Handler(ProductsHandler)
+// r.Path("/articles/{category}/{id:[0-9]+}").
+// Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, regexpTypePath)
+ return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
+ return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+// r := mux.NewRouter()
+// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// If the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+ length := len(pairs)
+ if length%2 != 0 {
+ r.err = fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ return nil
+ }
+ for i := 0; i < length; i += 2 {
+ if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
+ return r
+ }
+ }
+
+ return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ scheme := r.URL.Scheme
+ // https://golang.org/pkg/net/http/#Request
+ // "For [most] server requests, fields other than Path and RawQuery will be
+ // empty."
+ // Since we're an http muxer, the scheme is either going to be http or https
+ // though, so we can just set it based on the tls termination state.
+ if scheme == "" {
+ if r.TLS == nil {
+ scheme = "http"
+ } else {
+ scheme = "https"
+ }
+ }
+ return matchInArray(m, scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+// If the request's URL has a scheme set, it will be matched against.
+// Generally, the URL scheme will only be set if a previous handler set it,
+// such as the ProxyHeaders handler from gorilla/handlers.
+// If unset, the scheme will be determined based on the request's TLS
+// termination state.
+// The first argument to Schemes will be used when constructing a route URL.
+func (r *Route) Schemes(schemes ...string) *Route {
+ for k, v := range schemes {
+ schemes[k] = strings.ToLower(v)
+ }
+ if len(schemes) > 0 {
+ r.buildScheme = schemes[0]
+ }
+ return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+ if r.buildVarsFunc != nil {
+ // compose the old and new functions
+ old := r.buildVarsFunc
+ r.buildVarsFunc = func(m map[string]string) map[string]string {
+ return f(old(m))
+ }
+ } else {
+ r.buildVarsFunc = f
+ }
+ return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+// r := mux.NewRouter()
+// s := r.Host("www.example.com").Subrouter()
+// s.HandleFunc("/products/", ProductsHandler)
+// s.HandleFunc("/products/{key}", ProductHandler)
+// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+ // initialize a subrouter with a copy of the parent route's configuration
+ router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
+ r.addMatcher(router)
+ return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// ...a URL for it can be built using:
+//
+// url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+// "/articles/technology/42"
+//
+// This also works for host variables:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Host("{subdomain}.domain.com").
+// Name("article")
+//
+// // url.String() will be "http://news.domain.com/articles/technology/42"
+// url, err := r.Get("article").URL("subdomain", "news",
+// "category", "technology",
+// "id", "42")
+//
+// The scheme of the resulting url will be the first argument that was passed to Schemes:
+//
+// // url.String() will be "https://example.com"
+// r := mux.NewRouter()
+// url, err := r.Host("example.com")
+// .Schemes("https", "http").URL()
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ var scheme, host, path string
+ queries := make([]string, 0, len(r.regexp.queries))
+ if r.regexp.host != nil {
+ if host, err = r.regexp.host.url(values); err != nil {
+ return nil, err
+ }
+ scheme = "http"
+ if r.buildScheme != "" {
+ scheme = r.buildScheme
+ }
+ }
+ if r.regexp.path != nil {
+ if path, err = r.regexp.path.url(values); err != nil {
+ return nil, err
+ }
+ }
+ for _, q := range r.regexp.queries {
+ var query string
+ if query, err = q.url(values); err != nil {
+ return nil, err
+ }
+ queries = append(queries, query)
+ }
+ return &url.URL{
+ Scheme: scheme,
+ Host: host,
+ Path: path,
+ RawQuery: strings.Join(queries, "&"),
+ }, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp.host == nil {
+ return nil, errors.New("mux: route doesn't have a host")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ host, err := r.regexp.host.url(values)
+ if err != nil {
+ return nil, err
+ }
+ u := &url.URL{
+ Scheme: "http",
+ Host: host,
+ }
+ if r.buildScheme != "" {
+ u.Scheme = r.buildScheme
+ }
+ return u, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp.path == nil {
+ return nil, errors.New("mux: route doesn't have a path")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ path, err := r.regexp.path.url(values)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Path: path,
+ }, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp.path == nil {
+ return "", errors.New("mux: route doesn't have a path")
+ }
+ return r.regexp.path.template, nil
+}
+
+// GetPathRegexp returns the expanded regular expression used to match route path.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathRegexp() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp.path == nil {
+ return "", errors.New("mux: route does not have a path")
+ }
+ return r.regexp.path.regexp.String(), nil
+}
+
+// GetQueriesRegexp returns the expanded regular expressions used to match the
+// route queries.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not have queries.
+func (r *Route) GetQueriesRegexp() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp.queries == nil {
+ return nil, errors.New("mux: route doesn't have queries")
+ }
+ queries := make([]string, 0, len(r.regexp.queries))
+ for _, query := range r.regexp.queries {
+ queries = append(queries, query.regexp.String())
+ }
+ return queries, nil
+}
+
+// GetQueriesTemplates returns the templates used to build the
+// query matching.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define queries.
+func (r *Route) GetQueriesTemplates() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp.queries == nil {
+ return nil, errors.New("mux: route doesn't have queries")
+ }
+ queries := make([]string, 0, len(r.regexp.queries))
+ for _, query := range r.regexp.queries {
+ queries = append(queries, query.template)
+ }
+ return queries, nil
+}
+
+// GetMethods returns the methods the route matches against
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if route does not have methods.
+func (r *Route) GetMethods() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ for _, m := range r.matchers {
+ if methods, ok := m.(methodMatcher); ok {
+ return []string(methods), nil
+ }
+ }
+ return nil, errors.New("mux: route doesn't have methods")
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp.host == nil {
+ return "", errors.New("mux: route doesn't have a host")
+ }
+ return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+ m, err := mapFromPairsToString(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+ if r.buildVarsFunc != nil {
+ m = r.buildVarsFunc(m)
+ }
+ return m
+}
diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go
new file mode 100644
index 0000000..5f5c496
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/test_helpers.go
@@ -0,0 +1,19 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import "net/http"
+
+// SetURLVars sets the URL variables for the given request, to be accessed via
+// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
+// copy is returned.
+//
+// This API should only be used for testing purposes; it provides a way to
+// inject variables into the request context. Alternatively, URL variables
+// can be set by making a route that captures the required variables,
+// starting a server and sending the request to that server.
+func SetURLVars(r *http.Request, val map[string]string) *http.Request {
+ return requestWithVars(r, val)
+}
diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore
new file mode 100644
index 0000000..cd3fcd1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.idea/
+*.iml
diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS
new file mode 100644
index 0000000..1931f40
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/AUTHORS
@@ -0,0 +1,9 @@
+# This is the official list of Gorilla WebSocket authors for copyright
+# purposes.
+#
+# Please keep the list sorted.
+
+Gary Burd
+Google LLC (https://opensource.google.com/)
+Joachim Bauch
+
diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE
new file mode 100644
index 0000000..9171c97
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md
new file mode 100644
index 0000000..2517a28
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/README.md
@@ -0,0 +1,39 @@
+# Gorilla WebSocket
+
+[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
+[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket)
+
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
+[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+
+
+---
+
+⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)**
+
+---
+
+### Documentation
+
+* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
+* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
+* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
+* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
+* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+
+### Status
+
+The Gorilla WebSocket package provides a complete and tested implementation of
+the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
+package API is stable.
+
+### Installation
+
+ go get github.com/gorilla/websocket
+
+### Protocol Compliance
+
+The Gorilla WebSocket package passes the server tests in the [Autobahn Test
+Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
+subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
+
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
new file mode 100644
index 0000000..2efd835
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -0,0 +1,422 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// ErrBadHandshake is returned when the server response to opening handshake is
+// invalid.
+var ErrBadHandshake = errors.New("websocket: bad handshake")
+
+var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
+
+// NewClient creates a new client connection using the given net connection.
+// The URL u specifies the host and request URI. Use requestHeader to specify
+// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
+// (Cookie). Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etc.
+//
+// Deprecated: Use Dialer instead.
+func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
+ d := Dialer{
+ ReadBufferSize: readBufSize,
+ WriteBufferSize: writeBufSize,
+ NetDial: func(net, addr string) (net.Conn, error) {
+ return netConn, nil
+ },
+ }
+ return d.Dial(u.String(), requestHeader)
+}
+
+// A Dialer contains options for connecting to WebSocket server.
+//
+// It is safe to call Dialer's methods concurrently.
+type Dialer struct {
+ // NetDial specifies the dial function for creating TCP connections. If
+ // NetDial is nil, net.Dial is used.
+ NetDial func(network, addr string) (net.Conn, error)
+
+ // NetDialContext specifies the dial function for creating TCP connections. If
+ // NetDialContext is nil, NetDial is used.
+ NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If
+ // NetDialTLSContext is nil, NetDialContext is used.
+ // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and
+ // TLSClientConfig is ignored.
+ NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*http.Request) (*url.URL, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with tls.Client.
+ // If nil, the default configuration is used.
+ // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake
+ // is done there and TLSClientConfig is ignored.
+ TLSClientConfig *tls.Config
+
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then a useful default size is used. The I/O buffer sizes
+ // do not limit the size of the messages that can be sent or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the client's requested subprotocols.
+ Subprotocols []string
+
+ // EnableCompression specifies if the client should attempt to negotiate
+ // per message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+
+ // Jar specifies the cookie jar.
+ // If Jar is nil, cookies are not sent in requests and ignored
+ // in responses.
+ Jar http.CookieJar
+}
+
+// Dial creates a new client connection by calling DialContext with a background context.
+func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ return d.DialContext(context.Background(), urlStr, requestHeader)
+}
+
+var errMalformedURL = errors.New("malformed ws or wss URL")
+
+func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
+ hostPort = u.Host
+ hostNoPort = u.Host
+ if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
+ hostNoPort = hostNoPort[:i]
+ } else {
+ switch u.Scheme {
+ case "wss":
+ hostPort += ":443"
+ case "https":
+ hostPort += ":443"
+ default:
+ hostPort += ":80"
+ }
+ }
+ return hostPort, hostNoPort
+}
+
+// DefaultDialer is a dialer with all fields set to the default values.
+var DefaultDialer = &Dialer{
+ Proxy: http.ProxyFromEnvironment,
+ HandshakeTimeout: 45 * time.Second,
+}
+
+// nilDialer is dialer to use when receiver is nil.
+var nilDialer = *DefaultDialer
+
+// DialContext creates a new client connection. Use requestHeader to specify the
+// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
+// Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// The context will be used in the request and in the Dialer.
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etcetera. The response body may not contain the entire response and does not
+// need to be closed by the application.
+func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ if d == nil {
+ d = &nilDialer
+ }
+
+ challengeKey, err := generateChallengeKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ default:
+ return nil, nil, errMalformedURL
+ }
+
+ if u.User != nil {
+ // User name and password are not allowed in websocket URIs.
+ return nil, nil, errMalformedURL
+ }
+
+ req := &http.Request{
+ Method: http.MethodGet,
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: u.Host,
+ }
+ req = req.WithContext(ctx)
+
+ // Set the cookies present in the cookie jar of the dialer
+ if d.Jar != nil {
+ for _, cookie := range d.Jar.Cookies(u) {
+ req.AddCookie(cookie)
+ }
+ }
+
+ // Set the request headers using the capitalization for names and values in
+ // RFC examples. Although the capitalization shouldn't matter, there are
+ // servers that depend on it. The Header.Set method is not used because the
+ // method canonicalizes the header names.
+ req.Header["Upgrade"] = []string{"websocket"}
+ req.Header["Connection"] = []string{"Upgrade"}
+ req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
+ req.Header["Sec-WebSocket-Version"] = []string{"13"}
+ if len(d.Subprotocols) > 0 {
+ req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
+ }
+ for k, vs := range requestHeader {
+ switch {
+ case k == "Host":
+ if len(vs) > 0 {
+ req.Host = vs[0]
+ }
+ case k == "Upgrade" ||
+ k == "Connection" ||
+ k == "Sec-Websocket-Key" ||
+ k == "Sec-Websocket-Version" ||
+ k == "Sec-Websocket-Extensions" ||
+ (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
+ return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
+ case k == "Sec-Websocket-Protocol":
+ req.Header["Sec-WebSocket-Protocol"] = vs
+ default:
+ req.Header[k] = vs
+ }
+ }
+
+ if d.EnableCompression {
+ req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
+ }
+
+ if d.HandshakeTimeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
+ defer cancel()
+ }
+
+ // Get network dial function.
+ var netDial func(network, add string) (net.Conn, error)
+
+ switch u.Scheme {
+ case "http":
+ if d.NetDialContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialContext(ctx, network, addr)
+ }
+ } else if d.NetDial != nil {
+ netDial = d.NetDial
+ }
+ case "https":
+ if d.NetDialTLSContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialTLSContext(ctx, network, addr)
+ }
+ } else if d.NetDialContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialContext(ctx, network, addr)
+ }
+ } else if d.NetDial != nil {
+ netDial = d.NetDial
+ }
+ default:
+ return nil, nil, errMalformedURL
+ }
+
+ if netDial == nil {
+ netDialer := &net.Dialer{}
+ netDial = func(network, addr string) (net.Conn, error) {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ }
+
+ // If needed, wrap the dial function to set the connection deadline.
+ if deadline, ok := ctx.Deadline(); ok {
+ forwardDial := netDial
+ netDial = func(network, addr string) (net.Conn, error) {
+ c, err := forwardDial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ err = c.SetDeadline(deadline)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+ }
+ }
+
+ // If needed, wrap the dial function to connect through a proxy.
+ if d.Proxy != nil {
+ proxyURL, err := d.Proxy(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if proxyURL != nil {
+ dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
+ if err != nil {
+ return nil, nil, err
+ }
+ netDial = dialer.Dial
+ }
+ }
+
+ hostPort, hostNoPort := hostPortNoPort(u)
+ trace := httptrace.ContextClientTrace(ctx)
+ if trace != nil && trace.GetConn != nil {
+ trace.GetConn(hostPort)
+ }
+
+ netConn, err := netDial("tcp", hostPort)
+ if trace != nil && trace.GotConn != nil {
+ trace.GotConn(httptrace.GotConnInfo{
+ Conn: netConn,
+ })
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ defer func() {
+ if netConn != nil {
+ netConn.Close()
+ }
+ }()
+
+ if u.Scheme == "https" && d.NetDialTLSContext == nil {
+ // If NetDialTLSContext is set, assume that the TLS handshake has already been done
+
+ cfg := cloneTLSConfig(d.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = hostNoPort
+ }
+ tlsConn := tls.Client(netConn, cfg)
+ netConn = tlsConn
+
+ if trace != nil && trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ err := doHandshake(ctx, tlsConn, cfg)
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
+
+ if err := req.Write(netConn); err != nil {
+ return nil, nil, err
+ }
+
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
+ trace.GotFirstResponseByte()
+ }
+ }
+
+ resp, err := http.ReadResponse(conn.br, req)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if d.Jar != nil {
+ if rc := resp.Cookies(); len(rc) > 0 {
+ d.Jar.SetCookies(u, rc)
+ }
+ }
+
+ if resp.StatusCode != 101 ||
+ !tokenListContainsValue(resp.Header, "Upgrade", "websocket") ||
+ !tokenListContainsValue(resp.Header, "Connection", "upgrade") ||
+ resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
+ // Before closing the network connection on return from this
+ // function, slurp up some of the response to aid application
+ // debugging.
+ buf := make([]byte, 1024)
+ n, _ := io.ReadFull(resp.Body, buf)
+ resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+ return nil, resp, ErrBadHandshake
+ }
+
+ for _, ext := range parseExtensions(resp.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ _, snct := ext["server_no_context_takeover"]
+ _, cnct := ext["client_no_context_takeover"]
+ if !snct || !cnct {
+ return nil, resp, errInvalidCompression
+ }
+ conn.newCompressionWriter = compressNoContextTakeover
+ conn.newDecompressionReader = decompressNoContextTakeover
+ break
+ }
+
+ resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
+
+ netConn.SetDeadline(time.Time{})
+ netConn = nil // to avoid close in defer.
+ return conn, resp, nil
+}
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return cfg.Clone()
+}
diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go
new file mode 100644
index 0000000..813ffb1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/compression.go
@@ -0,0 +1,148 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "compress/flate"
+ "errors"
+ "io"
+ "strings"
+ "sync"
+)
+
+const (
+ minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
+ maxCompressionLevel = flate.BestCompression
+ defaultCompressionLevel = 1
+)
+
+var (
+ flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
+ flateReaderPool = sync.Pool{New: func() interface{} {
+ return flate.NewReader(nil)
+ }}
+)
+
+func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
+ const tail =
+ // Add four bytes as specified in RFC
+ "\x00\x00\xff\xff" +
+ // Add final block to squelch unexpected EOF error from flate reader.
+ "\x01\x00\x00\xff\xff"
+
+ fr, _ := flateReaderPool.Get().(io.ReadCloser)
+ fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+ return &flateReadWrapper{fr}
+}
+
+func isValidCompressionLevel(level int) bool {
+ return minCompressionLevel <= level && level <= maxCompressionLevel
+}
+
+func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
+ p := &flateWriterPools[level-minCompressionLevel]
+ tw := &truncWriter{w: w}
+ fw, _ := p.Get().(*flate.Writer)
+ if fw == nil {
+ fw, _ = flate.NewWriter(tw, level)
+ } else {
+ fw.Reset(tw)
+ }
+ return &flateWriteWrapper{fw: fw, tw: tw, p: p}
+}
+
+// truncWriter is an io.Writer that writes all but the last four bytes of the
+// stream to another io.Writer.
+type truncWriter struct {
+ w io.WriteCloser
+ n int
+ p [4]byte
+}
+
+func (w *truncWriter) Write(p []byte) (int, error) {
+ n := 0
+
+ // fill buffer first for simplicity.
+ if w.n < len(w.p) {
+ n = copy(w.p[w.n:], p)
+ p = p[n:]
+ w.n += n
+ if len(p) == 0 {
+ return n, nil
+ }
+ }
+
+ m := len(p)
+ if m > len(w.p) {
+ m = len(w.p)
+ }
+
+ if nn, err := w.w.Write(w.p[:m]); err != nil {
+ return n + nn, err
+ }
+
+ copy(w.p[:], w.p[m:])
+ copy(w.p[len(w.p)-m:], p[len(p)-m:])
+ nn, err := w.w.Write(p[:len(p)-m])
+ return n + nn, err
+}
+
+type flateWriteWrapper struct {
+ fw *flate.Writer
+ tw *truncWriter
+ p *sync.Pool
+}
+
+func (w *flateWriteWrapper) Write(p []byte) (int, error) {
+ if w.fw == nil {
+ return 0, errWriteClosed
+ }
+ return w.fw.Write(p)
+}
+
+func (w *flateWriteWrapper) Close() error {
+ if w.fw == nil {
+ return errWriteClosed
+ }
+ err1 := w.fw.Flush()
+ w.p.Put(w.fw)
+ w.fw = nil
+ if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
+ return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
+ }
+ err2 := w.tw.w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+type flateReadWrapper struct {
+ fr io.ReadCloser
+}
+
+func (r *flateReadWrapper) Read(p []byte) (int, error) {
+ if r.fr == nil {
+ return 0, io.ErrClosedPipe
+ }
+ n, err := r.fr.Read(p)
+ if err == io.EOF {
+ // Preemptively place the reader back in the pool. This helps with
+ // scenarios where the application does not call NextReader() soon after
+ // this final read.
+ r.Close()
+ }
+ return n, err
+}
+
+func (r *flateReadWrapper) Close() error {
+ if r.fr == nil {
+ return io.ErrClosedPipe
+ }
+ err := r.fr.Close()
+ flateReaderPool.Put(r.fr)
+ r.fr = nil
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
new file mode 100644
index 0000000..331eebc
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -0,0 +1,1230 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ // Frame header byte 0 bits from Section 5.2 of RFC 6455
+ finalBit = 1 << 7
+ rsv1Bit = 1 << 6
+ rsv2Bit = 1 << 5
+ rsv3Bit = 1 << 4
+
+ // Frame header byte 1 bits from Section 5.2 of RFC 6455
+ maskBit = 1 << 7
+
+ maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
+ maxControlFramePayloadSize = 125
+
+ writeWait = time.Second
+
+ defaultReadBufferSize = 4096
+ defaultWriteBufferSize = 4096
+
+ continuationFrame = 0
+ noFrame = -1
+)
+
+// Close codes defined in RFC 6455, section 11.7.
+const (
+ CloseNormalClosure = 1000
+ CloseGoingAway = 1001
+ CloseProtocolError = 1002
+ CloseUnsupportedData = 1003
+ CloseNoStatusReceived = 1005
+ CloseAbnormalClosure = 1006
+ CloseInvalidFramePayloadData = 1007
+ ClosePolicyViolation = 1008
+ CloseMessageTooBig = 1009
+ CloseMandatoryExtension = 1010
+ CloseInternalServerErr = 1011
+ CloseServiceRestart = 1012
+ CloseTryAgainLater = 1013
+ CloseTLSHandshake = 1015
+)
+
+// The message types are defined in RFC 6455, section 11.8.
+const (
+ // TextMessage denotes a text data message. The text message payload is
+ // interpreted as UTF-8 encoded text data.
+ TextMessage = 1
+
+ // BinaryMessage denotes a binary data message.
+ BinaryMessage = 2
+
+ // CloseMessage denotes a close control message. The optional message
+ // payload contains a numeric code and text. Use the FormatCloseMessage
+ // function to format a close message payload.
+ CloseMessage = 8
+
+ // PingMessage denotes a ping control message. The optional message payload
+ // is UTF-8 encoded text.
+ PingMessage = 9
+
+ // PongMessage denotes a pong control message. The optional message payload
+ // is UTF-8 encoded text.
+ PongMessage = 10
+)
+
+// ErrCloseSent is returned when the application writes a message to the
+// connection after sending a close message.
+var ErrCloseSent = errors.New("websocket: close sent")
+
+// ErrReadLimit is returned when reading a message that is larger than the
+// read limit set for the connection.
+var ErrReadLimit = errors.New("websocket: read limit exceeded")
+
+// netError satisfies the net Error interface.
+type netError struct {
+ msg string
+ temporary bool
+ timeout bool
+}
+
+func (e *netError) Error() string { return e.msg }
+func (e *netError) Temporary() bool { return e.temporary }
+func (e *netError) Timeout() bool { return e.timeout }
+
+// CloseError represents a close message.
+type CloseError struct {
+ // Code is defined in RFC 6455, section 11.7.
+ Code int
+
+ // Text is the optional text payload.
+ Text string
+}
+
+func (e *CloseError) Error() string {
+ s := []byte("websocket: close ")
+ s = strconv.AppendInt(s, int64(e.Code), 10)
+ switch e.Code {
+ case CloseNormalClosure:
+ s = append(s, " (normal)"...)
+ case CloseGoingAway:
+ s = append(s, " (going away)"...)
+ case CloseProtocolError:
+ s = append(s, " (protocol error)"...)
+ case CloseUnsupportedData:
+ s = append(s, " (unsupported data)"...)
+ case CloseNoStatusReceived:
+ s = append(s, " (no status)"...)
+ case CloseAbnormalClosure:
+ s = append(s, " (abnormal closure)"...)
+ case CloseInvalidFramePayloadData:
+ s = append(s, " (invalid payload data)"...)
+ case ClosePolicyViolation:
+ s = append(s, " (policy violation)"...)
+ case CloseMessageTooBig:
+ s = append(s, " (message too big)"...)
+ case CloseMandatoryExtension:
+ s = append(s, " (mandatory extension missing)"...)
+ case CloseInternalServerErr:
+ s = append(s, " (internal server error)"...)
+ case CloseTLSHandshake:
+ s = append(s, " (TLS handshake error)"...)
+ }
+ if e.Text != "" {
+ s = append(s, ": "...)
+ s = append(s, e.Text...)
+ }
+ return string(s)
+}
+
+// IsCloseError returns boolean indicating whether the error is a *CloseError
+// with one of the specified codes.
+func IsCloseError(err error, codes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range codes {
+ if e.Code == code {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IsUnexpectedCloseError returns boolean indicating whether the error is a
+// *CloseError with a code not in the list of expected codes.
+func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range expectedCodes {
+ if e.Code == code {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var (
+ errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
+ errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
+ errBadWriteOpCode = errors.New("websocket: bad write message type")
+ errWriteClosed = errors.New("websocket: write closed")
+ errInvalidControlFrame = errors.New("websocket: invalid control frame")
+)
+
+func newMaskKey() [4]byte {
+ n := rand.Uint32()
+ return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+}
+
+func hideTempErr(err error) error {
+ if e, ok := err.(net.Error); ok && e.Temporary() {
+ err = &netError{msg: e.Error(), timeout: e.Timeout()}
+ }
+ return err
+}
+
+func isControl(frameType int) bool {
+ return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
+}
+
+func isData(frameType int) bool {
+ return frameType == TextMessage || frameType == BinaryMessage
+}
+
+var validReceivedCloseCodes = map[int]bool{
+ // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+
+ CloseNormalClosure: true,
+ CloseGoingAway: true,
+ CloseProtocolError: true,
+ CloseUnsupportedData: true,
+ CloseNoStatusReceived: false,
+ CloseAbnormalClosure: false,
+ CloseInvalidFramePayloadData: true,
+ ClosePolicyViolation: true,
+ CloseMessageTooBig: true,
+ CloseMandatoryExtension: true,
+ CloseInternalServerErr: true,
+ CloseServiceRestart: true,
+ CloseTryAgainLater: true,
+ CloseTLSHandshake: false,
+}
+
+func isValidReceivedCloseCode(code int) bool {
+ return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
+}
+
+// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
+// interface. The type of the value stored in a pool is not specified.
+type BufferPool interface {
+ // Get gets a value from the pool or returns nil if the pool is empty.
+ Get() interface{}
+ // Put adds a value to the pool.
+ Put(interface{})
+}
+
+// writePoolData is the type added to the write buffer pool. This wrapper is
+// used to prevent applications from peeking at and depending on the values
+// added to the pool.
+type writePoolData struct{ buf []byte }
+
+// The Conn type represents a WebSocket connection.
+type Conn struct {
+ conn net.Conn
+ isServer bool
+ subprotocol string
+
+ // Write fields
+ mu chan struct{} // used as mutex to protect write to conn
+ writeBuf []byte // frame is constructed in this buffer.
+ writePool BufferPool
+ writeBufSize int
+ writeDeadline time.Time
+ writer io.WriteCloser // the current writer returned to the application
+ isWriting bool // for best-effort concurrent write detection
+
+ writeErrMu sync.Mutex
+ writeErr error
+
+ enableWriteCompression bool
+ compressionLevel int
+ newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
+
+ // Read fields
+ reader io.ReadCloser // the current reader returned to the application
+ readErr error
+ br *bufio.Reader
+ // bytes remaining in current frame.
+ // set setReadRemaining to safely update this value and prevent overflow
+ readRemaining int64
+ readFinal bool // true the current message has more frames.
+ readLength int64 // Message size.
+ readLimit int64 // Maximum message size.
+ readMaskPos int
+ readMaskKey [4]byte
+ handlePong func(string) error
+ handlePing func(string) error
+ handleClose func(int, string) error
+ readErrCount int
+ messageReader *messageReader // the current low-level reader
+
+ readDecompress bool // whether last read frame had RSV1 set
+ newDecompressionReader func(io.Reader) io.ReadCloser
+}
+
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
+
+ if br == nil {
+ if readBufferSize == 0 {
+ readBufferSize = defaultReadBufferSize
+ } else if readBufferSize < maxControlFramePayloadSize {
+ // must be large enough for control frame
+ readBufferSize = maxControlFramePayloadSize
+ }
+ br = bufio.NewReaderSize(conn, readBufferSize)
+ }
+
+ if writeBufferSize <= 0 {
+ writeBufferSize = defaultWriteBufferSize
+ }
+ writeBufferSize += maxFrameHeaderSize
+
+ if writeBuf == nil && writeBufferPool == nil {
+ writeBuf = make([]byte, writeBufferSize)
+ }
+
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ c := &Conn{
+ isServer: isServer,
+ br: br,
+ conn: conn,
+ mu: mu,
+ readFinal: true,
+ writeBuf: writeBuf,
+ writePool: writeBufferPool,
+ writeBufSize: writeBufferSize,
+ enableWriteCompression: true,
+ compressionLevel: defaultCompressionLevel,
+ }
+ c.SetCloseHandler(nil)
+ c.SetPingHandler(nil)
+ c.SetPongHandler(nil)
+ return c
+}
+
+// setReadRemaining tracks the number of bytes remaining on the connection. If n
+// overflows, an ErrReadLimit is returned.
+func (c *Conn) setReadRemaining(n int64) error {
+ if n < 0 {
+ return ErrReadLimit
+ }
+
+ c.readRemaining = n
+ return nil
+}
+
+// Subprotocol returns the negotiated protocol for the connection.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+// Close closes the underlying network connection without sending or waiting
+// for a close message.
+func (c *Conn) Close() error {
+ return c.conn.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+// Write methods
+
+func (c *Conn) writeFatal(err error) error {
+ err = hideTempErr(err)
+ c.writeErrMu.Lock()
+ if c.writeErr == nil {
+ c.writeErr = err
+ }
+ c.writeErrMu.Unlock()
+ return err
+}
+
+func (c *Conn) read(n int) ([]byte, error) {
+ p, err := c.br.Peek(n)
+ if err == io.EOF {
+ err = errUnexpectedEOF
+ }
+ c.br.Discard(len(p))
+ return p, err
+}
+
+func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
+ <-c.mu
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ if len(buf1) == 0 {
+ _, err = c.conn.Write(buf0)
+ } else {
+ err = c.writeBufs(buf0, buf1)
+ }
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if frameType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return nil
+}
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ b := net.Buffers(bufs)
+ _, err := b.WriteTo(c.conn)
+ return err
+}
+
+// WriteControl writes a control message with the given deadline. The allowed
+// message types are CloseMessage, PingMessage and PongMessage.
+func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
+ if !isControl(messageType) {
+ return errBadWriteOpCode
+ }
+ if len(data) > maxControlFramePayloadSize {
+ return errInvalidControlFrame
+ }
+
+ b0 := byte(messageType) | finalBit
+ b1 := byte(len(data))
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
+ buf = append(buf, b0, b1)
+
+ if c.isServer {
+ buf = append(buf, data...)
+ } else {
+ key := newMaskKey()
+ buf = append(buf, key[:]...)
+ buf = append(buf, data...)
+ maskBytes(key, 0, buf[6:])
+ }
+
+ d := 1000 * time.Hour
+ if !deadline.IsZero() {
+ d = deadline.Sub(time.Now())
+ if d < 0 {
+ return errWriteTimeout
+ }
+ }
+
+ timer := time.NewTimer(d)
+ select {
+ case <-c.mu:
+ timer.Stop()
+ case <-timer.C:
+ return errWriteTimeout
+ }
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ _, err = c.conn.Write(buf)
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if messageType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return err
+}
+
+// beginMessage prepares a connection and message writer for a new message.
+func (c *Conn) beginMessage(mw *messageWriter, messageType int) error {
+ // Close previous writer if not already closed by the application. It's
+ // probably better to return an error in this situation, but we cannot
+ // change this without breaking existing applications.
+ if c.writer != nil {
+ c.writer.Close()
+ c.writer = nil
+ }
+
+ if !isControl(messageType) && !isData(messageType) {
+ return errBadWriteOpCode
+ }
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ mw.c = c
+ mw.frameType = messageType
+ mw.pos = maxFrameHeaderSize
+
+ if c.writeBuf == nil {
+ wpd, ok := c.writePool.Get().(writePoolData)
+ if ok {
+ c.writeBuf = wpd.buf
+ } else {
+ c.writeBuf = make([]byte, c.writeBufSize)
+ }
+ }
+ return nil
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+//
+// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
+// PongMessage) are supported.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return nil, err
+ }
+ c.writer = &mw
+ if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
+ w := c.newCompressionWriter(c.writer, c.compressionLevel)
+ mw.compress = true
+ c.writer = w
+ }
+ return c.writer, nil
+}
+
+type messageWriter struct {
+ c *Conn
+ compress bool // whether next call to flushFrame should set RSV1
+ pos int // end of data in writeBuf.
+ frameType int // type of the current frame.
+ err error
+}
+
+func (w *messageWriter) endMessage(err error) error {
+ if w.err != nil {
+ return err
+ }
+ c := w.c
+ w.err = err
+ c.writer = nil
+ if c.writePool != nil {
+ c.writePool.Put(writePoolData{buf: c.writeBuf})
+ c.writeBuf = nil
+ }
+ return err
+}
+
+// flushFrame writes buffered data and extra as a frame to the network. The
+// final argument indicates that this is the last frame in the message.
+func (w *messageWriter) flushFrame(final bool, extra []byte) error {
+ c := w.c
+ length := w.pos - maxFrameHeaderSize + len(extra)
+
+ // Check for invalid control frames.
+ if isControl(w.frameType) &&
+ (!final || length > maxControlFramePayloadSize) {
+ return w.endMessage(errInvalidControlFrame)
+ }
+
+ b0 := byte(w.frameType)
+ if final {
+ b0 |= finalBit
+ }
+ if w.compress {
+ b0 |= rsv1Bit
+ }
+ w.compress = false
+
+ b1 := byte(0)
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ // Assume that the frame starts at beginning of c.writeBuf.
+ framePos := 0
+ if c.isServer {
+ // Adjust up if mask not included in the header.
+ framePos = 4
+ }
+
+ switch {
+ case length >= 65536:
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 127
+ binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
+ case length > 125:
+ framePos += 6
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 126
+ binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
+ default:
+ framePos += 8
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | byte(length)
+ }
+
+ if !c.isServer {
+ key := newMaskKey()
+ copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
+ maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
+ if len(extra) > 0 {
+ return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode")))
+ }
+ }
+
+ // Write the buffers to the connection with best-effort detection of
+ // concurrent writes. See the concurrency section in the package
+ // documentation for more info.
+
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+
+ err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
+
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+
+ if err != nil {
+ return w.endMessage(err)
+ }
+
+ if final {
+ w.endMessage(errWriteClosed)
+ return nil
+ }
+
+ // Setup for next frame.
+ w.pos = maxFrameHeaderSize
+ w.frameType = continuationFrame
+ return nil
+}
+
+func (w *messageWriter) ncopy(max int) (int, error) {
+ n := len(w.c.writeBuf) - w.pos
+ if n <= 0 {
+ if err := w.flushFrame(false, nil); err != nil {
+ return 0, err
+ }
+ n = len(w.c.writeBuf) - w.pos
+ }
+ if n > max {
+ n = max
+ }
+ return n, nil
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
+ // Don't buffer large messages.
+ err := w.flushFrame(false, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) WriteString(p string) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for {
+ if w.pos == len(w.c.writeBuf) {
+ err = w.flushFrame(false, nil)
+ if err != nil {
+ break
+ }
+ }
+ var n int
+ n, err = r.Read(w.c.writeBuf[w.pos:])
+ w.pos += n
+ nn += int64(n)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ }
+ return nn, err
+}
+
+func (w *messageWriter) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+ return w.flushFrame(true, nil)
+}
+
+// WritePreparedMessage writes prepared message into connection.
+func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
+ frameType, frameData, err := pm.frame(prepareKey{
+ isServer: c.isServer,
+ compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
+ compressionLevel: c.compressionLevel,
+ })
+ if err != nil {
+ return err
+ }
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+ err = c.write(frameType, c.writeDeadline, frameData, nil)
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+ return err
+}
+
+// WriteMessage is a helper method for getting a writer using NextWriter,
+// writing the message and closing the writer.
+func (c *Conn) WriteMessage(messageType int, data []byte) error {
+
+ if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+ // Fast path with no allocations and single frame.
+
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return err
+ }
+ n := copy(c.writeBuf[mw.pos:], data)
+ mw.pos += n
+ data = data[n:]
+ return mw.flushFrame(true, data)
+ }
+
+ w, err := c.NextWriter(messageType)
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(data); err != nil {
+ return err
+ }
+ return w.Close()
+}
+
+// SetWriteDeadline sets the write deadline on the underlying network
+// connection. After a write has timed out, the websocket state is corrupt and
+// all future writes will return an error. A zero value for t means writes will
+// not time out.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ c.writeDeadline = t
+ return nil
+}
+
+// Read methods
+
+func (c *Conn) advanceFrame() (int, error) {
+ // 1. Skip remainder of previous frame.
+
+ if c.readRemaining > 0 {
+ if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 2. Read and parse first two bytes of frame header.
+ // To aid debugging, collect and report all errors in the first two bytes
+ // of the header.
+
+ var errors []string
+
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ frameType := int(p[0] & 0xf)
+ final := p[0]&finalBit != 0
+ rsv1 := p[0]&rsv1Bit != 0
+ rsv2 := p[0]&rsv2Bit != 0
+ rsv3 := p[0]&rsv3Bit != 0
+ mask := p[1]&maskBit != 0
+ c.setReadRemaining(int64(p[1] & 0x7f))
+
+ c.readDecompress = false
+ if rsv1 {
+ if c.newDecompressionReader != nil {
+ c.readDecompress = true
+ } else {
+ errors = append(errors, "RSV1 set")
+ }
+ }
+
+ if rsv2 {
+ errors = append(errors, "RSV2 set")
+ }
+
+ if rsv3 {
+ errors = append(errors, "RSV3 set")
+ }
+
+ switch frameType {
+ case CloseMessage, PingMessage, PongMessage:
+ if c.readRemaining > maxControlFramePayloadSize {
+ errors = append(errors, "len > 125 for control")
+ }
+ if !final {
+ errors = append(errors, "FIN not set on control")
+ }
+ case TextMessage, BinaryMessage:
+ if !c.readFinal {
+ errors = append(errors, "data before FIN")
+ }
+ c.readFinal = final
+ case continuationFrame:
+ if c.readFinal {
+ errors = append(errors, "continuation after FIN")
+ }
+ c.readFinal = final
+ default:
+ errors = append(errors, "bad opcode "+strconv.Itoa(frameType))
+ }
+
+ if mask != c.isServer {
+ errors = append(errors, "bad MASK")
+ }
+
+ if len(errors) > 0 {
+ return noFrame, c.handleProtocolError(strings.Join(errors, ", "))
+ }
+
+ // 3. Read and parse frame length as per
+ // https://tools.ietf.org/html/rfc6455#section-5.2
+ //
+ // The length of the "Payload data", in bytes: if 0-125, that is the payload
+ // length.
+ // - If 126, the following 2 bytes interpreted as a 16-bit unsigned
+ // integer are the payload length.
+ // - If 127, the following 8 bytes interpreted as
+ // a 64-bit unsigned integer (the most significant bit MUST be 0) are the
+ // payload length. Multibyte length quantities are expressed in network byte
+ // order.
+
+ switch c.readRemaining {
+ case 126:
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil {
+ return noFrame, err
+ }
+ case 127:
+ p, err := c.read(8)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 4. Handle frame masking.
+
+ if mask {
+ c.readMaskPos = 0
+ p, err := c.read(len(c.readMaskKey))
+ if err != nil {
+ return noFrame, err
+ }
+ copy(c.readMaskKey[:], p)
+ }
+
+ // 5. For text and binary messages, enforce read limit and return.
+
+ if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
+
+ c.readLength += c.readRemaining
+ // Don't allow readLength to overflow in the presence of a large readRemaining
+ // counter.
+ if c.readLength < 0 {
+ return noFrame, ErrReadLimit
+ }
+
+ if c.readLimit > 0 && c.readLength > c.readLimit {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+ return noFrame, ErrReadLimit
+ }
+
+ return frameType, nil
+ }
+
+ // 6. Read control frame payload.
+
+ var payload []byte
+ if c.readRemaining > 0 {
+ payload, err = c.read(int(c.readRemaining))
+ c.setReadRemaining(0)
+ if err != nil {
+ return noFrame, err
+ }
+ if c.isServer {
+ maskBytes(c.readMaskKey, 0, payload)
+ }
+ }
+
+ // 7. Process control frame payload.
+
+ switch frameType {
+ case PongMessage:
+ if err := c.handlePong(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case PingMessage:
+ if err := c.handlePing(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case CloseMessage:
+ closeCode := CloseNoStatusReceived
+ closeText := ""
+ if len(payload) >= 2 {
+ closeCode = int(binary.BigEndian.Uint16(payload))
+ if !isValidReceivedCloseCode(closeCode) {
+ return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode))
+ }
+ closeText = string(payload[2:])
+ if !utf8.ValidString(closeText) {
+ return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
+ }
+ }
+ if err := c.handleClose(closeCode, closeText); err != nil {
+ return noFrame, err
+ }
+ return noFrame, &CloseError{Code: closeCode, Text: closeText}
+ }
+
+ return frameType, nil
+}
+
+func (c *Conn) handleProtocolError(message string) error {
+ data := FormatCloseMessage(CloseProtocolError, message)
+ if len(data) > maxControlFramePayloadSize {
+ data = data[:maxControlFramePayloadSize]
+ }
+ c.WriteControl(CloseMessage, data, time.Now().Add(writeWait))
+ return errors.New("websocket: " + message)
+}
+
+// NextReader returns the next data message received from the peer. The
+// returned messageType is either TextMessage or BinaryMessage.
+//
+// There can be at most one open reader on a connection. NextReader discards
+// the previous message if the application has not already consumed it.
+//
+// Applications must break out of the application's read loop when this method
+// returns a non-nil error value. Errors returned from this method are
+// permanent. Once this method returns a non-nil error, all subsequent calls to
+// this method return the same error.
+func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
+ // Close previous reader, only relevant for decompression.
+ if c.reader != nil {
+ c.reader.Close()
+ c.reader = nil
+ }
+
+ c.messageReader = nil
+ c.readLength = 0
+
+ for c.readErr == nil {
+ frameType, err := c.advanceFrame()
+ if err != nil {
+ c.readErr = hideTempErr(err)
+ break
+ }
+
+ if frameType == TextMessage || frameType == BinaryMessage {
+ c.messageReader = &messageReader{c}
+ c.reader = c.messageReader
+ if c.readDecompress {
+ c.reader = c.newDecompressionReader(c.reader)
+ }
+ return frameType, c.reader, nil
+ }
+ }
+
+ // Applications that do handle the error returned from this method spin in
+ // tight loop on connection failure. To help application developers detect
+ // this error, panic on repeated reads to the failed connection.
+ c.readErrCount++
+ if c.readErrCount >= 1000 {
+ panic("repeated read on failed websocket connection")
+ }
+
+ return noFrame, nil, c.readErr
+}
+
+type messageReader struct{ c *Conn }
+
+func (r *messageReader) Read(b []byte) (int, error) {
+ c := r.c
+ if c.messageReader != r {
+ return 0, io.EOF
+ }
+
+ for c.readErr == nil {
+
+ if c.readRemaining > 0 {
+ if int64(len(b)) > c.readRemaining {
+ b = b[:c.readRemaining]
+ }
+ n, err := c.br.Read(b)
+ c.readErr = hideTempErr(err)
+ if c.isServer {
+ c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
+ }
+ rem := c.readRemaining
+ rem -= int64(n)
+ c.setReadRemaining(rem)
+ if c.readRemaining > 0 && c.readErr == io.EOF {
+ c.readErr = errUnexpectedEOF
+ }
+ return n, c.readErr
+ }
+
+ if c.readFinal {
+ c.messageReader = nil
+ return 0, io.EOF
+ }
+
+ frameType, err := c.advanceFrame()
+ switch {
+ case err != nil:
+ c.readErr = hideTempErr(err)
+ case frameType == TextMessage || frameType == BinaryMessage:
+ c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
+ }
+ }
+
+ err := c.readErr
+ if err == io.EOF && c.messageReader == r {
+ err = errUnexpectedEOF
+ }
+ return 0, err
+}
+
+func (r *messageReader) Close() error {
+ return nil
+}
+
+// ReadMessage is a helper method for getting a reader using NextReader and
+// reading from that reader to a buffer.
+func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
+ var r io.Reader
+ messageType, r, err = c.NextReader()
+ if err != nil {
+ return messageType, nil, err
+ }
+ p, err = ioutil.ReadAll(r)
+ return messageType, p, err
+}
+
+// SetReadDeadline sets the read deadline on the underlying network connection.
+// After a read has timed out, the websocket connection state is corrupt and
+// all future reads will return an error. A zero value for t means reads will
+// not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a
+// message exceeds the limit, the connection sends a close message to the peer
+// and returns ErrReadLimit to the application.
+func (c *Conn) SetReadLimit(limit int64) {
+ c.readLimit = limit
+}
+
+// CloseHandler returns the current close handler
+func (c *Conn) CloseHandler() func(code int, text string) error {
+ return c.handleClose
+}
+
+// SetCloseHandler sets the handler for close messages received from the peer.
+// The code argument to h is the received close code or CloseNoStatusReceived
+// if the close message is empty. The default close handler sends a close
+// message back to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// close messages as described in the section on Control Messages above.
+//
+// The connection read methods return a CloseError when a close message is
+// received. Most applications should handle close messages as part of their
+// normal error handling. Applications should only set a close handler when the
+// application must perform some action before sending a close message back to
+// the peer.
+func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
+ if h == nil {
+ h = func(code int, text string) error {
+ message := FormatCloseMessage(code, "")
+ c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+ return nil
+ }
+ }
+ c.handleClose = h
+}
+
+// PingHandler returns the current ping handler
+func (c *Conn) PingHandler() func(appData string) error {
+ return c.handlePing
+}
+
+// SetPingHandler sets the handler for ping messages received from the peer.
+// The appData argument to h is the PING message application data. The default
+// ping handler sends a pong to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// ping messages as described in the section on Control Messages above.
+func (c *Conn) SetPingHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(message string) error {
+ err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
+ if err == ErrCloseSent {
+ return nil
+ } else if e, ok := err.(net.Error); ok && e.Temporary() {
+ return nil
+ }
+ return err
+ }
+ }
+ c.handlePing = h
+}
+
+// PongHandler returns the current pong handler
+func (c *Conn) PongHandler() func(appData string) error {
+ return c.handlePong
+}
+
+// SetPongHandler sets the handler for pong messages received from the peer.
+// The appData argument to h is the PONG message application data. The default
+// pong handler does nothing.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// pong messages as described in the section on Control Messages above.
+func (c *Conn) SetPongHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(string) error { return nil }
+ }
+ c.handlePong = h
+}
+
+// UnderlyingConn returns the internal net.Conn. This can be used to further
+// modifications to connection specific flags.
+func (c *Conn) UnderlyingConn() net.Conn {
+ return c.conn
+}
+
+// EnableWriteCompression enables and disables write compression of
+// subsequent text and binary messages. This function is a noop if
+// compression was not negotiated with the peer.
+func (c *Conn) EnableWriteCompression(enable bool) {
+ c.enableWriteCompression = enable
+}
+
+// SetCompressionLevel sets the flate compression level for subsequent text and
+// binary messages. This function is a noop if compression was not negotiated
+// with the peer. See the compress/flate package for a description of
+// compression levels.
+func (c *Conn) SetCompressionLevel(level int) error {
+ if !isValidCompressionLevel(level) {
+ return errors.New("websocket: invalid compression level")
+ }
+ c.compressionLevel = level
+ return nil
+}
+
+// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+// An empty message is returned for code CloseNoStatusReceived.
+func FormatCloseMessage(closeCode int, text string) []byte {
+ if closeCode == CloseNoStatusReceived {
+ // Return empty message because it's illegal to send
+ // CloseNoStatusReceived. Return non-nil value in case application
+ // checks for nil.
+ return []byte{}
+ }
+ buf := make([]byte, 2+len(text))
+ binary.BigEndian.PutUint16(buf, uint16(closeCode))
+ copy(buf[2:], text)
+ return buf
+}
diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go
new file mode 100644
index 0000000..8db0cef
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/doc.go
@@ -0,0 +1,227 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements the WebSocket protocol defined in RFC 6455.
+//
+// Overview
+//
+// The Conn type represents a WebSocket connection. A server application calls
+// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
+//
+// var upgrader = websocket.Upgrader{
+// ReadBufferSize: 1024,
+// WriteBufferSize: 1024,
+// }
+//
+// func handler(w http.ResponseWriter, r *http.Request) {
+// conn, err := upgrader.Upgrade(w, r, nil)
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// ... Use conn to send and receive messages.
+// }
+//
+// Call the connection's WriteMessage and ReadMessage methods to send and
+// receive messages as a slice of bytes. This snippet of code shows how to echo
+// messages using these methods:
+//
+// for {
+// messageType, p, err := conn.ReadMessage()
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// if err := conn.WriteMessage(messageType, p); err != nil {
+// log.Println(err)
+// return
+// }
+// }
+//
+// In above snippet of code, p is a []byte and messageType is an int with value
+// websocket.BinaryMessage or websocket.TextMessage.
+//
+// An application can also send and receive messages using the io.WriteCloser
+// and io.Reader interfaces. To send a message, call the connection NextWriter
+// method to get an io.WriteCloser, write the message to the writer and close
+// the writer when done. To receive a message, call the connection NextReader
+// method to get an io.Reader and read until io.EOF is returned. This snippet
+// shows how to echo messages using the NextWriter and NextReader methods:
+//
+// for {
+// messageType, r, err := conn.NextReader()
+// if err != nil {
+// return
+// }
+// w, err := conn.NextWriter(messageType)
+// if err != nil {
+// return err
+// }
+// if _, err := io.Copy(w, r); err != nil {
+// return err
+// }
+// if err := w.Close(); err != nil {
+// return err
+// }
+// }
+//
+// Data Messages
+//
+// The WebSocket protocol distinguishes between text and binary data messages.
+// Text messages are interpreted as UTF-8 encoded text. The interpretation of
+// binary messages is left to the application.
+//
+// This package uses the TextMessage and BinaryMessage integer constants to
+// identify the two data message types. The ReadMessage and NextReader methods
+// return the type of the received message. The messageType argument to the
+// WriteMessage and NextWriter methods specifies the type of a sent message.
+//
+// It is the application's responsibility to ensure that text messages are
+// valid UTF-8 encoded text.
+//
+// Control Messages
+//
+// The WebSocket protocol defines three types of control messages: close, ping
+// and pong. Call the connection WriteControl, WriteMessage or NextWriter
+// methods to send a control message to the peer.
+//
+// Connections handle received close messages by calling the handler function
+// set with the SetCloseHandler method and by returning a *CloseError from the
+// NextReader, ReadMessage or the message Read method. The default close
+// handler sends a close message to the peer.
+//
+// Connections handle received ping messages by calling the handler function
+// set with the SetPingHandler method. The default ping handler sends a pong
+// message to the peer.
+//
+// Connections handle received pong messages by calling the handler function
+// set with the SetPongHandler method. The default pong handler does nothing.
+// If an application sends ping messages, then the application should set a
+// pong handler to receive the corresponding pong.
+//
+// The control message handler functions are called from the NextReader,
+// ReadMessage and message reader Read methods. The default close and ping
+// handlers can block these methods for a short time when the handler writes to
+// the connection.
+//
+// The application must read the connection to process close, ping and pong
+// messages sent from the peer. If the application is not otherwise interested
+// in messages from the peer, then the application should start a goroutine to
+// read and discard messages from the peer. A simple example is:
+//
+// func readLoop(c *websocket.Conn) {
+// for {
+// if _, _, err := c.NextReader(); err != nil {
+// c.Close()
+// break
+// }
+// }
+// }
+//
+// Concurrency
+//
+// Connections support one concurrent reader and one concurrent writer.
+//
+// Applications are responsible for ensuring that no more than one goroutine
+// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
+// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
+// that no more than one goroutine calls the read methods (NextReader,
+// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
+// concurrently.
+//
+// The Close and WriteControl methods can be called concurrently with all other
+// methods.
+//
+// Origin Considerations
+//
+// Web browsers allow Javascript applications to open a WebSocket connection to
+// any host. It's up to the server to enforce an origin policy using the Origin
+// request header sent by the browser.
+//
+// The Upgrader calls the function specified in the CheckOrigin field to check
+// the origin. If the CheckOrigin function returns false, then the Upgrade
+// method fails the WebSocket handshake with HTTP status 403.
+//
+// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
+// the handshake if the Origin request header is present and the Origin host is
+// not equal to the Host request header.
+//
+// The deprecated package-level Upgrade function does not perform origin
+// checking. The application is responsible for checking the Origin header
+// before calling the Upgrade function.
+//
+// Buffers
+//
+// Connections buffer network input and output to reduce the number
+// of system calls when reading or writing messages.
+//
+// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
+// Section 5 for a discussion of message framing. A WebSocket frame header is
+// written to the network each time a write buffer is flushed to the network.
+// Decreasing the size of the write buffer can increase the amount of framing
+// overhead on the connection.
+//
+// The buffer sizes in bytes are specified by the ReadBufferSize and
+// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
+// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
+// buffers created by the HTTP server when a buffer size field is set to zero.
+// The HTTP server buffers have a size of 4096 at the time of this writing.
+//
+// The buffer sizes do not limit the size of a message that can be read or
+// written by a connection.
+//
+// Buffers are held for the lifetime of the connection by default. If the
+// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
+// write buffer only when writing a message.
+//
+// Applications should tune the buffer sizes to balance memory use and
+// performance. Increasing the buffer size uses more memory, but can reduce the
+// number of system calls to read or write the network. In the case of writing,
+// increasing the buffer size can reduce the number of frame headers written to
+// the network.
+//
+// Some guidelines for setting buffer parameters are:
+//
+// Limit the buffer sizes to the maximum expected message size. Buffers larger
+// than the largest message do not provide any benefit.
+//
+// Depending on the distribution of message sizes, setting the buffer size to
+// a value less than the maximum expected message size can greatly reduce memory
+// use with a small impact on performance. Here's an example: If 99% of the
+// messages are smaller than 256 bytes and the maximum message size is 512
+// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
+// than a buffer size of 512 bytes. The memory savings is 50%.
+//
+// A write buffer pool is useful when the application has a modest number
+// writes over a large number of connections. when buffers are pooled, a larger
+// buffer size has a reduced impact on total memory use and has the benefit of
+// reducing system calls and frame overhead.
+//
+// Compression EXPERIMENTAL
+//
+// Per message compression extensions (RFC 7692) are experimentally supported
+// by this package in a limited capacity. Setting the EnableCompression option
+// to true in Dialer or Upgrader will attempt to negotiate per message deflate
+// support.
+//
+// var upgrader = websocket.Upgrader{
+// EnableCompression: true,
+// }
+//
+// If compression was successfully negotiated with the connection's peer, any
+// message received in compressed form will be automatically decompressed.
+// All Read methods will return uncompressed bytes.
+//
+// Per message compression of messages written to a connection can be enabled
+// or disabled by calling the corresponding Conn method:
+//
+// conn.EnableWriteCompression(false)
+//
+// Currently this package does not support compression with "context takeover".
+// This means that messages must be compressed and decompressed in isolation,
+// without retaining sliding window or dictionary state across messages. For
+// more details refer to RFC 7692.
+//
+// Use of compression is experimental and may result in decreased performance.
+package websocket
diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go
new file mode 100644
index 0000000..c64f8c8
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/join.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "io"
+ "strings"
+)
+
+// JoinMessages concatenates received messages to create a single io.Reader.
+// The string term is appended to each message. The returned reader does not
+// support concurrent calls to the Read method.
+func JoinMessages(c *Conn, term string) io.Reader {
+ return &joinReader{c: c, term: term}
+}
+
+type joinReader struct {
+ c *Conn
+ term string
+ r io.Reader
+}
+
+func (r *joinReader) Read(p []byte) (int, error) {
+ if r.r == nil {
+ var err error
+ _, r.r, err = r.c.NextReader()
+ if err != nil {
+ return 0, err
+ }
+ if r.term != "" {
+ r.r = io.MultiReader(r.r, strings.NewReader(r.term))
+ }
+ }
+ n, err := r.r.Read(p)
+ if err == io.EOF {
+ err = nil
+ r.r = nil
+ }
+ return n, err
+}
diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go
new file mode 100644
index 0000000..dc2c1f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/json.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// Deprecated: Use c.WriteJSON instead.
+func WriteJSON(c *Conn, v interface{}) error {
+ return c.WriteJSON(v)
+}
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// See the documentation for encoding/json Marshal for details about the
+// conversion of Go values to JSON.
+func (c *Conn) WriteJSON(v interface{}) error {
+ w, err := c.NextWriter(TextMessage)
+ if err != nil {
+ return err
+ }
+ err1 := json.NewEncoder(w).Encode(v)
+ err2 := w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// Deprecated: Use c.ReadJSON instead.
+func ReadJSON(c *Conn, v interface{}) error {
+ return c.ReadJSON(v)
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// See the documentation for the encoding/json Unmarshal function for details
+// about the conversion of JSON to a Go value.
+func (c *Conn) ReadJSON(v interface{}) error {
+ _, r, err := c.NextReader()
+ if err != nil {
+ return err
+ }
+ err = json.NewDecoder(r).Decode(v)
+ if err == io.EOF {
+ // One value is expected in the message.
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go
new file mode 100644
index 0000000..d0742bf
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask.go
@@ -0,0 +1,55 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+//go:build !appengine
+// +build !appengine
+
+package websocket
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ // Mask one byte at a time for small buffers.
+ if len(b) < 2*wordSize {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+ }
+
+ // Mask one byte at a time to word boundary.
+ if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
+ n = wordSize - n
+ for i := range b[:n] {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ b = b[n:]
+ }
+
+ // Create aligned word size key.
+ var k [wordSize]byte
+ for i := range k {
+ k[i] = key[(pos+i)&3]
+ }
+ kw := *(*uintptr)(unsafe.Pointer(&k))
+
+ // Mask one word at a time.
+ n := (len(b) / wordSize) * wordSize
+ for i := 0; i < n; i += wordSize {
+ *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
+ }
+
+ // Mask one byte at a time for remaining bytes.
+ b = b[n:]
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go
new file mode 100644
index 0000000..36250ca
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask_safe.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+//go:build appengine
+// +build appengine
+
+package websocket
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go
new file mode 100644
index 0000000..c854225
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/prepared.go
@@ -0,0 +1,102 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "net"
+ "sync"
+ "time"
+)
+
+// PreparedMessage caches on the wire representations of a message payload.
+// Use PreparedMessage to efficiently send a message payload to multiple
+// connections. PreparedMessage is especially useful when compression is used
+// because the CPU and memory expensive compression operation can be executed
+// once for a given set of compression options.
+type PreparedMessage struct {
+ messageType int
+ data []byte
+ mu sync.Mutex
+ frames map[prepareKey]*preparedFrame
+}
+
+// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
+type prepareKey struct {
+ isServer bool
+ compress bool
+ compressionLevel int
+}
+
+// preparedFrame contains data in wire representation.
+type preparedFrame struct {
+ once sync.Once
+ data []byte
+}
+
+// NewPreparedMessage returns an initialized PreparedMessage. You can then send
+// it to connection using WritePreparedMessage method. Valid wire
+// representation will be calculated lazily only once for a set of current
+// connection options.
+func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
+ pm := &PreparedMessage{
+ messageType: messageType,
+ frames: make(map[prepareKey]*preparedFrame),
+ data: data,
+ }
+
+ // Prepare a plain server frame.
+ _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
+ if err != nil {
+ return nil, err
+ }
+
+ // To protect against caller modifying the data argument, remember the data
+ // copied to the plain server frame.
+ pm.data = frameData[len(frameData)-len(data):]
+ return pm, nil
+}
+
+func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
+ pm.mu.Lock()
+ frame, ok := pm.frames[key]
+ if !ok {
+ frame = &preparedFrame{}
+ pm.frames[key] = frame
+ }
+ pm.mu.Unlock()
+
+ var err error
+ frame.once.Do(func() {
+ // Prepare a frame using a 'fake' connection.
+ // TODO: Refactor code in conn.go to allow more direct construction of
+ // the frame.
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ var nc prepareConn
+ c := &Conn{
+ conn: &nc,
+ mu: mu,
+ isServer: key.isServer,
+ compressionLevel: key.compressionLevel,
+ enableWriteCompression: true,
+ writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
+ }
+ if key.compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ }
+ err = c.WriteMessage(pm.messageType, pm.data)
+ frame.data = nc.buf.Bytes()
+ })
+ return pm.messageType, frame.data, err
+}
+
+type prepareConn struct {
+ buf bytes.Buffer
+ net.Conn
+}
+
+func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
+func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go
new file mode 100644
index 0000000..e0f466b
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/proxy.go
@@ -0,0 +1,77 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/base64"
+ "errors"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type netDialerFunc func(network, addr string) (net.Conn, error)
+
+func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
+ return fn(network, addr)
+}
+
+func init() {
+ proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
+ return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
+ })
+}
+
+type httpProxyDialer struct {
+ proxyURL *url.URL
+ forwardDial func(network, addr string) (net.Conn, error)
+}
+
+func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
+ hostPort, _ := hostPortNoPort(hpd.proxyURL)
+ conn, err := hpd.forwardDial(network, hostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connectHeader := make(http.Header)
+ if user := hpd.proxyURL.User; user != nil {
+ proxyUser := user.Username()
+ if proxyPassword, passwordSet := user.Password(); passwordSet {
+ credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+ connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+ }
+ }
+
+ connectReq := &http.Request{
+ Method: http.MethodConnect,
+ URL: &url.URL{Opaque: addr},
+ Host: addr,
+ Header: connectHeader,
+ }
+
+ if err := connectReq.Write(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Read response. It's OK to use and discard buffered reader here becaue
+ // the remote server does not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ conn.Close()
+ f := strings.SplitN(resp.Status, " ", 2)
+ return nil, errors.New(f[1])
+ }
+ return conn, nil
+}
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
new file mode 100644
index 0000000..24d53b3
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -0,0 +1,365 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// HandshakeError describes an error with the handshake from the peer.
+type HandshakeError struct {
+ message string
+}
+
+func (e HandshakeError) Error() string { return e.message }
+
+// Upgrader specifies parameters for upgrading an HTTP connection to a
+// WebSocket connection.
+//
+// It is safe to call Upgrader's methods concurrently.
+type Upgrader struct {
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then buffers allocated by the HTTP server are used. The
+ // I/O buffer sizes do not limit the size of the messages that can be sent
+ // or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the server's supported protocols in order of
+ // preference. If this field is not nil, then the Upgrade method negotiates a
+ // subprotocol by selecting the first match in this list with a protocol
+ // requested by the client. If there's no match, then no protocol is
+ // negotiated (the Sec-Websocket-Protocol header is not included in the
+ // handshake response).
+ Subprotocols []string
+
+ // Error specifies the function for generating HTTP error responses. If Error
+ // is nil, then http.Error is used to generate the HTTP response.
+ Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
+
+ // CheckOrigin returns true if the request Origin header is acceptable. If
+ // CheckOrigin is nil, then a safe default is used: return false if the
+ // Origin request header is present and the origin host is not equal to
+ // request Host header.
+ //
+ // A CheckOrigin function should carefully validate the request origin to
+ // prevent cross-site request forgery.
+ CheckOrigin func(r *http.Request) bool
+
+ // EnableCompression specify if the server should attempt to negotiate per
+ // message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+}
+
+func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
+ err := HandshakeError{reason}
+ if u.Error != nil {
+ u.Error(w, r, status, err)
+ } else {
+ w.Header().Set("Sec-Websocket-Version", "13")
+ http.Error(w, http.StatusText(status), status)
+ }
+ return nil, err
+}
+
+// checkSameOrigin returns true if the origin is not set or is equal to the request host.
+func checkSameOrigin(r *http.Request) bool {
+ origin := r.Header["Origin"]
+ if len(origin) == 0 {
+ return true
+ }
+ u, err := url.Parse(origin[0])
+ if err != nil {
+ return false
+ }
+ return equalASCIIFold(u.Host, r.Host)
+}
+
+func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
+ if u.Subprotocols != nil {
+ clientProtocols := Subprotocols(r)
+ for _, serverProtocol := range u.Subprotocols {
+ for _, clientProtocol := range clientProtocols {
+ if clientProtocol == serverProtocol {
+ return clientProtocol
+ }
+ }
+ }
+ } else if responseHeader != nil {
+ return responseHeader.Get("Sec-Websocket-Protocol")
+ }
+ return ""
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie). To specify
+// subprotocols supported by the server, set Upgrader.Subprotocols directly.
+//
+// If the upgrade fails, then Upgrade replies to the client with an HTTP error
+// response.
+func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
+ const badHandshake = "websocket: the client is not using the websocket protocol: "
+
+ if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
+ }
+
+ if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
+ }
+
+ if r.Method != http.MethodGet {
+ return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
+ }
+
+ if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
+ }
+
+ if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
+ }
+
+ checkOrigin := u.CheckOrigin
+ if checkOrigin == nil {
+ checkOrigin = checkSameOrigin
+ }
+ if !checkOrigin(r) {
+ return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
+ }
+
+ challengeKey := r.Header.Get("Sec-Websocket-Key")
+ if challengeKey == "" {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
+ }
+
+ subprotocol := u.selectSubprotocol(r, responseHeader)
+
+ // Negotiate PMCE
+ var compress bool
+ if u.EnableCompression {
+ for _, ext := range parseExtensions(r.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ compress = true
+ break
+ }
+ }
+
+ h, ok := w.(http.Hijacker)
+ if !ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
+ }
+ var brw *bufio.ReadWriter
+ netConn, brw, err := h.Hijack()
+ if err != nil {
+ return u.returnError(w, r, http.StatusInternalServerError, err.Error())
+ }
+
+ if brw.Reader.Buffered() > 0 {
+ netConn.Close()
+ return nil, errors.New("websocket: client sent data before handshake is complete")
+ }
+
+ var br *bufio.Reader
+ if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
+ // Reuse hijacked buffered reader as connection reader.
+ br = brw.Reader
+ }
+
+ buf := bufioWriterBuffer(netConn, brw.Writer)
+
+ var writeBuf []byte
+ if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
+ // Reuse hijacked write buffer as connection buffer.
+ writeBuf = buf
+ }
+
+ c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
+ c.subprotocol = subprotocol
+
+ if compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ c.newDecompressionReader = decompressNoContextTakeover
+ }
+
+ // Use larger of hijacked buffer and connection write buffer for header.
+ p := buf
+ if len(c.writeBuf) > len(p) {
+ p = c.writeBuf
+ }
+ p = p[:0]
+
+ p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
+ p = append(p, computeAcceptKey(challengeKey)...)
+ p = append(p, "\r\n"...)
+ if c.subprotocol != "" {
+ p = append(p, "Sec-WebSocket-Protocol: "...)
+ p = append(p, c.subprotocol...)
+ p = append(p, "\r\n"...)
+ }
+ if compress {
+ p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
+ }
+ for k, vs := range responseHeader {
+ if k == "Sec-Websocket-Protocol" {
+ continue
+ }
+ for _, v := range vs {
+ p = append(p, k...)
+ p = append(p, ": "...)
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if b <= 31 {
+ // prevent response splitting.
+ b = ' '
+ }
+ p = append(p, b)
+ }
+ p = append(p, "\r\n"...)
+ }
+ }
+ p = append(p, "\r\n"...)
+
+ // Clear deadlines set by HTTP server.
+ netConn.SetDeadline(time.Time{})
+
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+ }
+ if _, err = netConn.Write(p); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Time{})
+ }
+
+ return c, nil
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// Deprecated: Use websocket.Upgrader instead.
+//
+// Upgrade does not perform origin checking. The application is responsible for
+// checking the Origin header before calling Upgrade. An example implementation
+// of the same origin policy check is:
+//
+// if req.Header.Get("Origin") != "http://"+req.Host {
+// http.Error(w, "Origin not allowed", http.StatusForbidden)
+// return
+// }
+//
+// If the endpoint supports subprotocols, then the application is responsible
+// for negotiating the protocol used on the connection. Use the Subprotocols()
+// function to get the subprotocols requested by the client. Use the
+// Sec-Websocket-Protocol response header to specify the subprotocol selected
+// by the application.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// The connection buffers IO to the underlying network connection. The
+// readBufSize and writeBufSize parameters specify the size of the buffers to
+// use. Messages can be larger than the buffers.
+//
+// If the request is not a valid WebSocket handshake, then Upgrade returns an
+// error of type HandshakeError. Applications should handle this error by
+// replying to the client with an HTTP error response.
+func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
+ u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
+ u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+ // don't return errors to maintain backwards compatibility
+ }
+ u.CheckOrigin = func(r *http.Request) bool {
+ // allow all connections by default
+ return true
+ }
+ return u.Upgrade(w, r, responseHeader)
+}
+
+// Subprotocols returns the subprotocols requested by the client in the
+// Sec-Websocket-Protocol header.
+func Subprotocols(r *http.Request) []string {
+ h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
+ if h == "" {
+ return nil
+ }
+ protocols := strings.Split(h, ",")
+ for i := range protocols {
+ protocols[i] = strings.TrimSpace(protocols[i])
+ }
+ return protocols
+}
+
+// IsWebSocketUpgrade returns true if the client requested upgrade to the
+// WebSocket protocol.
+func IsWebSocketUpgrade(r *http.Request) bool {
+ return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
+ tokenListContainsValue(r.Header, "Upgrade", "websocket")
+}
+
+// bufioReaderSize size returns the size of a bufio.Reader.
+func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
+ // This code assumes that peek on a reset reader returns
+ // bufio.Reader.buf[:0].
+ // TODO: Use bufio.Reader.Size() after Go 1.10
+ br.Reset(originalReader)
+ if p, err := br.Peek(0); err == nil {
+ return cap(p)
+ }
+ return 0
+}
+
+// writeHook is an io.Writer that records the last slice passed to it vio
+// io.Writer.Write.
+type writeHook struct {
+ p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+ wh.p = p
+ return len(p), nil
+}
+
+// bufioWriterBuffer grabs the buffer from a bufio.Writer.
+func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
+ // This code assumes that bufio.Writer.buf[:1] is passed to the
+ // bufio.Writer's underlying writer.
+ var wh writeHook
+ bw.Reset(&wh)
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(originalWriter)
+
+ return wh.p[:cap(wh.p)]
+}
diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go
new file mode 100644
index 0000000..a62b68c
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/tls_handshake.go
@@ -0,0 +1,21 @@
+//go:build go1.17
+// +build go1.17
+
+package websocket
+
+import (
+ "context"
+ "crypto/tls"
+)
+
+func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
+ if err := tlsConn.HandshakeContext(ctx); err != nil {
+ return err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go
new file mode 100644
index 0000000..e1b2b44
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go
@@ -0,0 +1,21 @@
+//go:build !go1.17
+// +build !go1.17
+
+package websocket
+
+import (
+ "context"
+ "crypto/tls"
+)
+
+func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
new file mode 100644
index 0000000..7bf2f66
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/util.go
@@ -0,0 +1,283 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "io"
+ "net/http"
+ "strings"
+ "unicode/utf8"
+)
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func computeAcceptKey(challengeKey string) string {
+ h := sha1.New()
+ h.Write([]byte(challengeKey))
+ h.Write(keyGUID)
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func generateChallengeKey() (string, error) {
+ p := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, p); err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(p), nil
+}
+
+// Token octets per RFC 2616.
+var isTokenOctet = [256]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+// skipSpace returns a slice of the string s with all leading RFC 2616 linear
+// whitespace removed.
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if b := s[i]; b != ' ' && b != '\t' {
+ break
+ }
+ }
+ return s[i:]
+}
+
+// nextToken returns the leading RFC 2616 token of s and the string following
+// the token.
+func nextToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if !isTokenOctet[s[i]] {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
+// and the string following the token or quoted string.
+func nextTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return nextToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
+
+// equalASCIIFold returns true if s is equal to t with ASCII case folding as
+// defined in RFC 4790.
+func equalASCIIFold(s, t string) bool {
+ for s != "" && t != "" {
+ sr, size := utf8.DecodeRuneInString(s)
+ s = s[size:]
+ tr, size := utf8.DecodeRuneInString(t)
+ t = t[size:]
+ if sr == tr {
+ continue
+ }
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+ return s == t
+}
+
+// tokenListContainsValue returns true if the 1#token header with the given
+// name contains a token equal to value with ASCII case folding.
+func tokenListContainsValue(header http.Header, name string, value string) bool {
+headers:
+ for _, s := range header[name] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ if equalASCIIFold(t, value) {
+ return true
+ }
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return false
+}
+
+// parseExtensions parses WebSocket extensions from a header.
+func parseExtensions(header http.Header) []map[string]string {
+ // From RFC 6455:
+ //
+ // Sec-WebSocket-Extensions = extension-list
+ // extension-list = 1#extension
+ // extension = extension-token *( ";" extension-param )
+ // extension-token = registered-token
+ // registered-token = token
+ // extension-param = token [ "=" (token | quoted-string) ]
+ // ;When using the quoted-string syntax variant, the value
+ // ;after quoted-string unescaping MUST conform to the
+ // ;'token' ABNF.
+
+ var result []map[string]string
+headers:
+ for _, s := range header["Sec-Websocket-Extensions"] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ ext := map[string]string{"": t}
+ for {
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ";") {
+ break
+ }
+ var k string
+ k, s = nextToken(skipSpace(s[1:]))
+ if k == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ var v string
+ if strings.HasPrefix(s, "=") {
+ v, s = nextTokenOrQuoted(skipSpace(s[1:]))
+ s = skipSpace(s)
+ }
+ if s != "" && s[0] != ',' && s[0] != ';' {
+ continue headers
+ }
+ ext[k] = v
+ }
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ result = append(result, ext)
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go
new file mode 100644
index 0000000..2e668f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go
@@ -0,0 +1,473 @@
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+//
+
+package websocket
+
+import (
+ "errors"
+ "io"
+ "net"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type proxy_direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var proxy_Direct = proxy_direct{}
+
+func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type proxy_PerHost struct {
+ def, bypass proxy_Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
+ return &proxy_PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone ".example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *proxy_PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *proxy_PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *proxy_PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *proxy_PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
+
+// A Dialer is a means to establish a connection.
+type proxy_Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type proxy_Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func proxy_FromEnvironment() proxy_Dialer {
+ allProxy := proxy_allProxyEnv.Get()
+ if len(allProxy) == 0 {
+ return proxy_Direct
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return proxy_Direct
+ }
+ proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
+ if err != nil {
+ return proxy_Direct
+ }
+
+ noProxy := proxy_noProxyEnv.Get()
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := proxy_NewPerHost(proxy, proxy_Direct)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
+ if proxy_proxySchemes == nil {
+ proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
+ }
+ proxy_proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
+ var auth *proxy_Auth
+ if u.User != nil {
+ auth = new(proxy_Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5":
+ return proxy_SOCKS5("tcp", u.Host, auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxy_proxySchemes != nil {
+ if f, ok := proxy_proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+ proxy_allProxyEnv = &proxy_envOnce{
+ names: []string{"ALL_PROXY", "all_proxy"},
+ }
+ proxy_noProxyEnv = &proxy_envOnce{
+ names: []string{"NO_PROXY", "no_proxy"},
+ }
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type proxy_envOnce struct {
+ names []string
+ once sync.Once
+ val string
+}
+
+func (e *proxy_envOnce) Get() string {
+ e.once.Do(e.init)
+ return e.val
+}
+
+func (e *proxy_envOnce) init() {
+ for _, n := range e.names {
+ e.val = os.Getenv(n)
+ if e.val != "" {
+ return
+ }
+ }
+}
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928 and RFC 1929.
+func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
+ s := &proxy_socks5{
+ network: network,
+ addr: addr,
+ forward: forward,
+ }
+ if auth != nil {
+ s.user = auth.User
+ s.password = auth.Password
+ }
+
+ return s, nil
+}
+
+type proxy_socks5 struct {
+ user, password string
+ network, addr string
+ forward proxy_Dialer
+}
+
+const proxy_socks5Version = 5
+
+const (
+ proxy_socks5AuthNone = 0
+ proxy_socks5AuthPassword = 2
+)
+
+const proxy_socks5Connect = 1
+
+const (
+ proxy_socks5IP4 = 1
+ proxy_socks5Domain = 3
+ proxy_socks5IP6 = 4
+)
+
+var proxy_socks5Errors = []string{
+ "",
+ "general failure",
+ "connection forbidden",
+ "network unreachable",
+ "host unreachable",
+ "connection refused",
+ "TTL expired",
+ "command not supported",
+ "address type not supported",
+}
+
+// Dial connects to the address addr on the given network via the SOCKS5 proxy.
+func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+ }
+
+ conn, err := s.forward.Dial(s.network, s.addr)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.connect(conn, addr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ return conn, nil
+}
+
+// connect takes an existing connection to a socks5 proxy server,
+// and commands the server to extend that connection to target,
+// which must be a canonical address with a host and port.
+func (s *proxy_socks5) connect(conn net.Conn, target string) error {
+ host, portStr, err := net.SplitHostPort(target)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return errors.New("proxy: failed to parse port number: " + portStr)
+ }
+ if port < 1 || port > 0xffff {
+ return errors.New("proxy: port number out of range: " + portStr)
+ }
+
+ // the size here is just an estimate
+ buf := make([]byte, 0, 6+len(host))
+
+ buf = append(buf, proxy_socks5Version)
+ if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+ buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
+ } else {
+ buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
+ }
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ if buf[0] != 5 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+ }
+ if buf[1] == 0xff {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+ }
+
+ // See RFC 1929
+ if buf[1] == proxy_socks5AuthPassword {
+ buf = buf[:0]
+ buf = append(buf, 1 /* password protocol version */)
+ buf = append(buf, uint8(len(s.user)))
+ buf = append(buf, s.user...)
+ buf = append(buf, uint8(len(s.password)))
+ buf = append(buf, s.password...)
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if buf[1] != 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+ }
+ }
+
+ buf = buf[:0]
+ buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
+
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ buf = append(buf, proxy_socks5IP4)
+ ip = ip4
+ } else {
+ buf = append(buf, proxy_socks5IP6)
+ }
+ buf = append(buf, ip...)
+ } else {
+ if len(host) > 255 {
+ return errors.New("proxy: destination host name too long: " + host)
+ }
+ buf = append(buf, proxy_socks5Domain)
+ buf = append(buf, byte(len(host)))
+ buf = append(buf, host...)
+ }
+ buf = append(buf, byte(port>>8), byte(port))
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+ return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ failure := "unknown error"
+ if int(buf[1]) < len(proxy_socks5Errors) {
+ failure = proxy_socks5Errors[buf[1]]
+ }
+
+ if len(failure) > 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+ }
+
+ bytesToDiscard := 0
+ switch buf[3] {
+ case proxy_socks5IP4:
+ bytesToDiscard = net.IPv4len
+ case proxy_socks5IP6:
+ bytesToDiscard = net.IPv6len
+ case proxy_socks5Domain:
+ _, err := io.ReadFull(conn, buf[:1])
+ if err != nil {
+ return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ bytesToDiscard = int(buf[0])
+ default:
+ return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+ }
+
+ if cap(buf) < bytesToDiscard {
+ buf = make([]byte, bytesToDiscard)
+ } else {
+ buf = buf[:bytesToDiscard]
+ }
+ if _, err := io.ReadFull(conn, buf); err != nil {
+ return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ // Also need to discard the port number
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jinzhu/gorm/.gitignore b/vendor/github.com/jinzhu/gorm/.gitignore
new file mode 100644
index 0000000..117f92f
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/.gitignore
@@ -0,0 +1,3 @@
+documents
+coverage.txt
+_book
diff --git a/vendor/github.com/jinzhu/gorm/License b/vendor/github.com/jinzhu/gorm/License
new file mode 100644
index 0000000..037e165
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/License
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-NOW Jinzhu
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/jinzhu/gorm/README.md b/vendor/github.com/jinzhu/gorm/README.md
new file mode 100644
index 0000000..85588a7
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/README.md
@@ -0,0 +1,5 @@
+# GORM
+
+GORM V2 moved to https://github.com/go-gorm/gorm
+
+GORM V1 Doc https://v1.gorm.io/
diff --git a/vendor/github.com/jinzhu/gorm/association.go b/vendor/github.com/jinzhu/gorm/association.go
new file mode 100644
index 0000000..a73344f
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/association.go
@@ -0,0 +1,377 @@
+package gorm
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Association Mode contains some helper methods to handle relationship things easily.
+type Association struct {
+ Error error
+ scope *Scope
+ column string
+ field *Field
+}
+
+// Find find out all related associations
+func (association *Association) Find(value interface{}) *Association {
+ association.scope.related(value, association.column)
+ return association.setErr(association.scope.db.Error)
+}
+
+// Append append new associations for many2many, has_many, replace current association for has_one, belongs_to
+func (association *Association) Append(values ...interface{}) *Association {
+ if association.Error != nil {
+ return association
+ }
+
+ if relationship := association.field.Relationship; relationship.Kind == "has_one" {
+ return association.Replace(values...)
+ }
+ return association.saveAssociations(values...)
+}
+
+// Replace replace current associations with new one
+func (association *Association) Replace(values ...interface{}) *Association {
+ if association.Error != nil {
+ return association
+ }
+
+ var (
+ relationship = association.field.Relationship
+ scope = association.scope
+ field = association.field.Field
+ newDB = scope.NewDB()
+ )
+
+ // Append new values
+ association.field.Set(reflect.Zero(association.field.Field.Type()))
+ association.saveAssociations(values...)
+
+ // Belongs To
+ if relationship.Kind == "belongs_to" {
+ // Set foreign key to be null when clearing value (length equals 0)
+ if len(values) == 0 {
+ // Set foreign key to be nil
+ var foreignKeyMap = map[string]interface{}{}
+ for _, foreignKey := range relationship.ForeignDBNames {
+ foreignKeyMap[foreignKey] = nil
+ }
+ association.setErr(newDB.Model(scope.Value).UpdateColumn(foreignKeyMap).Error)
+ }
+ } else {
+ // Polymorphic Relations
+ if relationship.PolymorphicDBName != "" {
+ newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue)
+ }
+
+ // Delete Relations except new created
+ if len(values) > 0 {
+ var associationForeignFieldNames, associationForeignDBNames []string
+ if relationship.Kind == "many_to_many" {
+ // if many to many relations, get association fields name from association foreign keys
+ associationScope := scope.New(reflect.New(field.Type()).Interface())
+ for idx, dbName := range relationship.AssociationForeignFieldNames {
+ if field, ok := associationScope.FieldByName(dbName); ok {
+ associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
+ associationForeignDBNames = append(associationForeignDBNames, relationship.AssociationForeignDBNames[idx])
+ }
+ }
+ } else {
+ // If has one/many relations, use primary keys
+ for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() {
+ associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
+ associationForeignDBNames = append(associationForeignDBNames, field.DBName)
+ }
+ }
+
+ newPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, field.Interface())
+
+ if len(newPrimaryKeys) > 0 {
+ sql := fmt.Sprintf("%v NOT IN (%v)", toQueryCondition(scope, associationForeignDBNames), toQueryMarks(newPrimaryKeys))
+ newDB = newDB.Where(sql, toQueryValues(newPrimaryKeys)...)
+ }
+ }
+
+ if relationship.Kind == "many_to_many" {
+ // if many to many relations, delete related relations from join table
+ var sourceForeignFieldNames []string
+
+ for _, dbName := range relationship.ForeignFieldNames {
+ if field, ok := scope.FieldByName(dbName); ok {
+ sourceForeignFieldNames = append(sourceForeignFieldNames, field.Name)
+ }
+ }
+
+ if sourcePrimaryKeys := scope.getColumnAsArray(sourceForeignFieldNames, scope.Value); len(sourcePrimaryKeys) > 0 {
+ newDB = newDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(sourcePrimaryKeys)), toQueryValues(sourcePrimaryKeys)...)
+
+ association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB))
+ }
+ } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" {
+ // has_one or has_many relations, set foreign key to be nil (TODO or delete them?)
+ var foreignKeyMap = map[string]interface{}{}
+ for idx, foreignKey := range relationship.ForeignDBNames {
+ foreignKeyMap[foreignKey] = nil
+ if field, ok := scope.FieldByName(relationship.AssociationForeignFieldNames[idx]); ok {
+ newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
+ }
+ }
+
+ fieldValue := reflect.New(association.field.Field.Type()).Interface()
+ association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error)
+ }
+ }
+ return association
+}
+
+// Delete remove relationship between source & passed arguments, but won't delete those arguments
+func (association *Association) Delete(values ...interface{}) *Association {
+ if association.Error != nil {
+ return association
+ }
+
+ var (
+ relationship = association.field.Relationship
+ scope = association.scope
+ field = association.field.Field
+ newDB = scope.NewDB()
+ )
+
+ if len(values) == 0 {
+ return association
+ }
+
+ var deletingResourcePrimaryFieldNames, deletingResourcePrimaryDBNames []string
+ for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() {
+ deletingResourcePrimaryFieldNames = append(deletingResourcePrimaryFieldNames, field.Name)
+ deletingResourcePrimaryDBNames = append(deletingResourcePrimaryDBNames, field.DBName)
+ }
+
+ deletingPrimaryKeys := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, values...)
+
+ if relationship.Kind == "many_to_many" {
+ // source value's foreign keys
+ for idx, foreignKey := range relationship.ForeignDBNames {
+ if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok {
+ newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
+ }
+ }
+
+ // get association's foreign fields name
+ var associationScope = scope.New(reflect.New(field.Type()).Interface())
+ var associationForeignFieldNames []string
+ for _, associationDBName := range relationship.AssociationForeignFieldNames {
+ if field, ok := associationScope.FieldByName(associationDBName); ok {
+ associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
+ }
+ }
+
+ // association value's foreign keys
+ deletingPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, values...)
+ sql := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(deletingPrimaryKeys))
+ newDB = newDB.Where(sql, toQueryValues(deletingPrimaryKeys)...)
+
+ association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB))
+ } else {
+ var foreignKeyMap = map[string]interface{}{}
+ for _, foreignKey := range relationship.ForeignDBNames {
+ foreignKeyMap[foreignKey] = nil
+ }
+
+ if relationship.Kind == "belongs_to" {
+ // find with deleting relation's foreign keys
+ primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, values...)
+ newDB = newDB.Where(
+ fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
+ toQueryValues(primaryKeys)...,
+ )
+
+ // set foreign key to be null if there are some records affected
+ modelValue := reflect.New(scope.GetModelStruct().ModelType).Interface()
+ if results := newDB.Model(modelValue).UpdateColumn(foreignKeyMap); results.Error == nil {
+ if results.RowsAffected > 0 {
+ scope.updatedAttrsWithValues(foreignKeyMap)
+ }
+ } else {
+ association.setErr(results.Error)
+ }
+ } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" {
+ // find all relations
+ primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value)
+ newDB = newDB.Where(
+ fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
+ toQueryValues(primaryKeys)...,
+ )
+
+ // only include those deleting relations
+ newDB = newDB.Where(
+ fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, deletingResourcePrimaryDBNames), toQueryMarks(deletingPrimaryKeys)),
+ toQueryValues(deletingPrimaryKeys)...,
+ )
+
+ // set matched relation's foreign key to be null
+ fieldValue := reflect.New(association.field.Field.Type()).Interface()
+ association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error)
+ }
+ }
+
+ // Remove deleted records from source's field
+ if association.Error == nil {
+ if field.Kind() == reflect.Slice {
+ leftValues := reflect.Zero(field.Type())
+
+ for i := 0; i < field.Len(); i++ {
+ reflectValue := field.Index(i)
+ primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, reflectValue.Interface())[0]
+ var isDeleted = false
+ for _, pk := range deletingPrimaryKeys {
+ if equalAsString(primaryKey, pk) {
+ isDeleted = true
+ break
+ }
+ }
+ if !isDeleted {
+ leftValues = reflect.Append(leftValues, reflectValue)
+ }
+ }
+
+ association.field.Set(leftValues)
+ } else if field.Kind() == reflect.Struct {
+ primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, field.Interface())[0]
+ for _, pk := range deletingPrimaryKeys {
+ if equalAsString(primaryKey, pk) {
+ association.field.Set(reflect.Zero(field.Type()))
+ break
+ }
+ }
+ }
+ }
+
+ return association
+}
+
+// Clear remove relationship between source & current associations, won't delete those associations
+func (association *Association) Clear() *Association {
+ return association.Replace()
+}
+
+// Count return the count of current associations
+func (association *Association) Count() int {
+ var (
+ count = 0
+ relationship = association.field.Relationship
+ scope = association.scope
+ fieldValue = association.field.Field.Interface()
+ query = scope.DB()
+ )
+
+ switch relationship.Kind {
+ case "many_to_many":
+ query = relationship.JoinTableHandler.JoinWith(relationship.JoinTableHandler, query, scope.Value)
+ case "has_many", "has_one":
+ primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value)
+ query = query.Where(
+ fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
+ toQueryValues(primaryKeys)...,
+ )
+ case "belongs_to":
+ primaryKeys := scope.getColumnAsArray(relationship.ForeignFieldNames, scope.Value)
+ query = query.Where(
+ fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(primaryKeys)),
+ toQueryValues(primaryKeys)...,
+ )
+ }
+
+ if relationship.PolymorphicType != "" {
+ query = query.Where(
+ fmt.Sprintf("%v.%v = ?", scope.New(fieldValue).QuotedTableName(), scope.Quote(relationship.PolymorphicDBName)),
+ relationship.PolymorphicValue,
+ )
+ }
+
+ if err := query.Model(fieldValue).Count(&count).Error; err != nil {
+ association.Error = err
+ }
+ return count
+}
+
+// saveAssociations save passed values as associations
+func (association *Association) saveAssociations(values ...interface{}) *Association {
+ var (
+ scope = association.scope
+ field = association.field
+ relationship = field.Relationship
+ )
+
+ saveAssociation := func(reflectValue reflect.Value) {
+ // value has to been pointer
+ if reflectValue.Kind() != reflect.Ptr {
+ reflectPtr := reflect.New(reflectValue.Type())
+ reflectPtr.Elem().Set(reflectValue)
+ reflectValue = reflectPtr
+ }
+
+ // value has to been saved for many2many
+ if relationship.Kind == "many_to_many" {
+ if scope.New(reflectValue.Interface()).PrimaryKeyZero() {
+ association.setErr(scope.NewDB().Save(reflectValue.Interface()).Error)
+ }
+ }
+
+ // Assign Fields
+ var fieldType = field.Field.Type()
+ var setFieldBackToValue, setSliceFieldBackToValue bool
+ if reflectValue.Type().AssignableTo(fieldType) {
+ field.Set(reflectValue)
+ } else if reflectValue.Type().Elem().AssignableTo(fieldType) {
+ // if field's type is struct, then need to set value back to argument after save
+ setFieldBackToValue = true
+ field.Set(reflectValue.Elem())
+ } else if fieldType.Kind() == reflect.Slice {
+ if reflectValue.Type().AssignableTo(fieldType.Elem()) {
+ field.Set(reflect.Append(field.Field, reflectValue))
+ } else if reflectValue.Type().Elem().AssignableTo(fieldType.Elem()) {
+ // if field's type is slice of struct, then need to set value back to argument after save
+ setSliceFieldBackToValue = true
+ field.Set(reflect.Append(field.Field, reflectValue.Elem()))
+ }
+ }
+
+ if relationship.Kind == "many_to_many" {
+ association.setErr(relationship.JoinTableHandler.Add(relationship.JoinTableHandler, scope.NewDB(), scope.Value, reflectValue.Interface()))
+ } else {
+ association.setErr(scope.NewDB().Select(field.Name).Save(scope.Value).Error)
+
+ if setFieldBackToValue {
+ reflectValue.Elem().Set(field.Field)
+ } else if setSliceFieldBackToValue {
+ reflectValue.Elem().Set(field.Field.Index(field.Field.Len() - 1))
+ }
+ }
+ }
+
+ for _, value := range values {
+ reflectValue := reflect.ValueOf(value)
+ indirectReflectValue := reflect.Indirect(reflectValue)
+ if indirectReflectValue.Kind() == reflect.Struct {
+ saveAssociation(reflectValue)
+ } else if indirectReflectValue.Kind() == reflect.Slice {
+ for i := 0; i < indirectReflectValue.Len(); i++ {
+ saveAssociation(indirectReflectValue.Index(i))
+ }
+ } else {
+ association.setErr(errors.New("invalid value type"))
+ }
+ }
+ return association
+}
+
+// setErr set error when the error is not nil. And return Association.
+func (association *Association) setErr(err error) *Association {
+ if err != nil {
+ association.Error = err
+ }
+ return association
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback.go b/vendor/github.com/jinzhu/gorm/callback.go
new file mode 100644
index 0000000..1f0e3c7
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/callback.go
@@ -0,0 +1,250 @@
+package gorm
+
+import "fmt"
+
+// DefaultCallback default callbacks defined by gorm
+var DefaultCallback = &Callback{logger: nopLogger{}}
+
+// Callback is a struct that contains all CRUD callbacks
+// Field `creates` contains callbacks will be call when creating object
+// Field `updates` contains callbacks will be call when updating object
+// Field `deletes` contains callbacks will be call when deleting object
+// Field `queries` contains callbacks will be call when querying object with query methods like Find, First, Related, Association...
+// Field `rowQueries` contains callbacks will be call when querying object with Row, Rows...
+// Field `processors` contains all callback processors, will be used to generate above callbacks in order
+type Callback struct {
+ logger logger
+ creates []*func(scope *Scope)
+ updates []*func(scope *Scope)
+ deletes []*func(scope *Scope)
+ queries []*func(scope *Scope)
+ rowQueries []*func(scope *Scope)
+ processors []*CallbackProcessor
+}
+
+// CallbackProcessor contains callback informations
+type CallbackProcessor struct {
+ logger logger
+ name string // current callback's name
+ before string // register current callback before a callback
+ after string // register current callback after a callback
+ replace bool // replace callbacks with same name
+ remove bool // delete callbacks with same name
+ kind string // callback type: create, update, delete, query, row_query
+ processor *func(scope *Scope) // callback handler
+ parent *Callback
+}
+
+func (c *Callback) clone(logger logger) *Callback {
+ return &Callback{
+ logger: logger,
+ creates: c.creates,
+ updates: c.updates,
+ deletes: c.deletes,
+ queries: c.queries,
+ rowQueries: c.rowQueries,
+ processors: c.processors,
+ }
+}
+
+// Create could be used to register callbacks for creating object
+// db.Callback().Create().After("gorm:create").Register("plugin:run_after_create", func(*Scope) {
+// // business logic
+// ...
+//
+// // set error if some thing wrong happened, will rollback the creating
+// scope.Err(errors.New("error"))
+// })
+func (c *Callback) Create() *CallbackProcessor {
+ return &CallbackProcessor{logger: c.logger, kind: "create", parent: c}
+}
+
+// Update could be used to register callbacks for updating object, refer `Create` for usage
+func (c *Callback) Update() *CallbackProcessor {
+ return &CallbackProcessor{logger: c.logger, kind: "update", parent: c}
+}
+
+// Delete could be used to register callbacks for deleting object, refer `Create` for usage
+func (c *Callback) Delete() *CallbackProcessor {
+ return &CallbackProcessor{logger: c.logger, kind: "delete", parent: c}
+}
+
+// Query could be used to register callbacks for querying objects with query methods like `Find`, `First`, `Related`, `Association`...
+// Refer `Create` for usage
+func (c *Callback) Query() *CallbackProcessor {
+ return &CallbackProcessor{logger: c.logger, kind: "query", parent: c}
+}
+
+// RowQuery could be used to register callbacks for querying objects with `Row`, `Rows`, refer `Create` for usage
+func (c *Callback) RowQuery() *CallbackProcessor {
+ return &CallbackProcessor{logger: c.logger, kind: "row_query", parent: c}
+}
+
+// After insert a new callback after callback `callbackName`, refer `Callbacks.Create`
+func (cp *CallbackProcessor) After(callbackName string) *CallbackProcessor {
+ cp.after = callbackName
+ return cp
+}
+
+// Before insert a new callback before callback `callbackName`, refer `Callbacks.Create`
+func (cp *CallbackProcessor) Before(callbackName string) *CallbackProcessor {
+ cp.before = callbackName
+ return cp
+}
+
+// Register a new callback, refer `Callbacks.Create`
+func (cp *CallbackProcessor) Register(callbackName string, callback func(scope *Scope)) {
+ if cp.kind == "row_query" {
+ if cp.before == "" && cp.after == "" && callbackName != "gorm:row_query" {
+ cp.logger.Print("info", fmt.Sprintf("Registering RowQuery callback %v without specify order with Before(), After(), applying Before('gorm:row_query') by default for compatibility...", callbackName))
+ cp.before = "gorm:row_query"
+ }
+ }
+
+ cp.logger.Print("info", fmt.Sprintf("[info] registering callback `%v` from %v", callbackName, fileWithLineNum()))
+ cp.name = callbackName
+ cp.processor = &callback
+ cp.parent.processors = append(cp.parent.processors, cp)
+ cp.parent.reorder()
+}
+
+// Remove a registered callback
+// db.Callback().Create().Remove("gorm:update_time_stamp_when_create")
+func (cp *CallbackProcessor) Remove(callbackName string) {
+ cp.logger.Print("info", fmt.Sprintf("[info] removing callback `%v` from %v", callbackName, fileWithLineNum()))
+ cp.name = callbackName
+ cp.remove = true
+ cp.parent.processors = append(cp.parent.processors, cp)
+ cp.parent.reorder()
+}
+
+// Replace a registered callback with new callback
+// db.Callback().Create().Replace("gorm:update_time_stamp_when_create", func(*Scope) {
+// scope.SetColumn("CreatedAt", now)
+// scope.SetColumn("UpdatedAt", now)
+// })
+func (cp *CallbackProcessor) Replace(callbackName string, callback func(scope *Scope)) {
+ cp.logger.Print("info", fmt.Sprintf("[info] replacing callback `%v` from %v", callbackName, fileWithLineNum()))
+ cp.name = callbackName
+ cp.processor = &callback
+ cp.replace = true
+ cp.parent.processors = append(cp.parent.processors, cp)
+ cp.parent.reorder()
+}
+
+// Get registered callback
+// db.Callback().Create().Get("gorm:create")
+func (cp *CallbackProcessor) Get(callbackName string) (callback func(scope *Scope)) {
+ for _, p := range cp.parent.processors {
+ if p.name == callbackName && p.kind == cp.kind {
+ if p.remove {
+ callback = nil
+ } else {
+ callback = *p.processor
+ }
+ }
+ }
+ return
+}
+
+// getRIndex get right index from string slice
+func getRIndex(strs []string, str string) int {
+ for i := len(strs) - 1; i >= 0; i-- {
+ if strs[i] == str {
+ return i
+ }
+ }
+ return -1
+}
+
+// sortProcessors sort callback processors based on its before, after, remove, replace
+func sortProcessors(cps []*CallbackProcessor) []*func(scope *Scope) {
+ var (
+ allNames, sortedNames []string
+ sortCallbackProcessor func(c *CallbackProcessor)
+ )
+
+ for _, cp := range cps {
+ // show warning message the callback name already exists
+ if index := getRIndex(allNames, cp.name); index > -1 && !cp.replace && !cp.remove {
+ cp.logger.Print("warning", fmt.Sprintf("[warning] duplicated callback `%v` from %v", cp.name, fileWithLineNum()))
+ }
+ allNames = append(allNames, cp.name)
+ }
+
+ sortCallbackProcessor = func(c *CallbackProcessor) {
+ if getRIndex(sortedNames, c.name) == -1 { // if not sorted
+ if c.before != "" { // if defined before callback
+ if index := getRIndex(sortedNames, c.before); index != -1 {
+ // if before callback already sorted, append current callback just after it
+ sortedNames = append(sortedNames[:index], append([]string{c.name}, sortedNames[index:]...)...)
+ } else if index := getRIndex(allNames, c.before); index != -1 {
+ // if before callback exists but haven't sorted, append current callback to last
+ sortedNames = append(sortedNames, c.name)
+ sortCallbackProcessor(cps[index])
+ }
+ }
+
+ if c.after != "" { // if defined after callback
+ if index := getRIndex(sortedNames, c.after); index != -1 {
+ // if after callback already sorted, append current callback just before it
+ sortedNames = append(sortedNames[:index+1], append([]string{c.name}, sortedNames[index+1:]...)...)
+ } else if index := getRIndex(allNames, c.after); index != -1 {
+ // if after callback exists but haven't sorted
+ cp := cps[index]
+ // set after callback's before callback to current callback
+ if cp.before == "" {
+ cp.before = c.name
+ }
+ sortCallbackProcessor(cp)
+ }
+ }
+
+ // if current callback haven't been sorted, append it to last
+ if getRIndex(sortedNames, c.name) == -1 {
+ sortedNames = append(sortedNames, c.name)
+ }
+ }
+ }
+
+ for _, cp := range cps {
+ sortCallbackProcessor(cp)
+ }
+
+ var sortedFuncs []*func(scope *Scope)
+ for _, name := range sortedNames {
+ if index := getRIndex(allNames, name); !cps[index].remove {
+ sortedFuncs = append(sortedFuncs, cps[index].processor)
+ }
+ }
+
+ return sortedFuncs
+}
+
+// reorder all registered processors, and reset CRUD callbacks
+func (c *Callback) reorder() {
+ var creates, updates, deletes, queries, rowQueries []*CallbackProcessor
+
+ for _, processor := range c.processors {
+ if processor.name != "" {
+ switch processor.kind {
+ case "create":
+ creates = append(creates, processor)
+ case "update":
+ updates = append(updates, processor)
+ case "delete":
+ deletes = append(deletes, processor)
+ case "query":
+ queries = append(queries, processor)
+ case "row_query":
+ rowQueries = append(rowQueries, processor)
+ }
+ }
+ }
+
+ c.creates = sortProcessors(creates)
+ c.updates = sortProcessors(updates)
+ c.deletes = sortProcessors(deletes)
+ c.queries = sortProcessors(queries)
+ c.rowQueries = sortProcessors(rowQueries)
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_create.go b/vendor/github.com/jinzhu/gorm/callback_create.go
new file mode 100644
index 0000000..c4d25f3
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/callback_create.go
@@ -0,0 +1,197 @@
+package gorm
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Define callbacks for creating
+func init() {
+ DefaultCallback.Create().Register("gorm:begin_transaction", beginTransactionCallback)
+ DefaultCallback.Create().Register("gorm:before_create", beforeCreateCallback)
+ DefaultCallback.Create().Register("gorm:save_before_associations", saveBeforeAssociationsCallback)
+ DefaultCallback.Create().Register("gorm:update_time_stamp", updateTimeStampForCreateCallback)
+ DefaultCallback.Create().Register("gorm:create", createCallback)
+ DefaultCallback.Create().Register("gorm:force_reload_after_create", forceReloadAfterCreateCallback)
+ DefaultCallback.Create().Register("gorm:save_after_associations", saveAfterAssociationsCallback)
+ DefaultCallback.Create().Register("gorm:after_create", afterCreateCallback)
+ DefaultCallback.Create().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
+}
+
+// beforeCreateCallback will invoke `BeforeSave`, `BeforeCreate` method before creating
+func beforeCreateCallback(scope *Scope) {
+ if !scope.HasError() {
+ scope.CallMethod("BeforeSave")
+ }
+ if !scope.HasError() {
+ scope.CallMethod("BeforeCreate")
+ }
+}
+
+// updateTimeStampForCreateCallback will set `CreatedAt`, `UpdatedAt` when creating
+func updateTimeStampForCreateCallback(scope *Scope) {
+ if !scope.HasError() {
+ now := scope.db.nowFunc()
+
+ if createdAtField, ok := scope.FieldByName("CreatedAt"); ok {
+ if createdAtField.IsBlank {
+ createdAtField.Set(now)
+ }
+ }
+
+ if updatedAtField, ok := scope.FieldByName("UpdatedAt"); ok {
+ if updatedAtField.IsBlank {
+ updatedAtField.Set(now)
+ }
+ }
+ }
+}
+
+// createCallback the callback used to insert data into database
+func createCallback(scope *Scope) {
+ if !scope.HasError() {
+ defer scope.trace(NowFunc())
+
+ var (
+ columns, placeholders []string
+ blankColumnsWithDefaultValue []string
+ )
+
+ for _, field := range scope.Fields() {
+ if scope.changeableField(field) {
+ if field.IsNormal && !field.IsIgnored {
+ if field.IsBlank && field.HasDefaultValue {
+ blankColumnsWithDefaultValue = append(blankColumnsWithDefaultValue, scope.Quote(field.DBName))
+ scope.InstanceSet("gorm:blank_columns_with_default_value", blankColumnsWithDefaultValue)
+ } else if !field.IsPrimaryKey || !field.IsBlank {
+ columns = append(columns, scope.Quote(field.DBName))
+ placeholders = append(placeholders, scope.AddToVars(field.Field.Interface()))
+ }
+ } else if field.Relationship != nil && field.Relationship.Kind == "belongs_to" {
+ for _, foreignKey := range field.Relationship.ForeignDBNames {
+ if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) {
+ columns = append(columns, scope.Quote(foreignField.DBName))
+ placeholders = append(placeholders, scope.AddToVars(foreignField.Field.Interface()))
+ }
+ }
+ }
+ }
+ }
+
+ var (
+ returningColumn = "*"
+ quotedTableName = scope.QuotedTableName()
+ primaryField = scope.PrimaryField()
+ extraOption string
+ insertModifier string
+ )
+
+ if str, ok := scope.Get("gorm:insert_option"); ok {
+ extraOption = fmt.Sprint(str)
+ }
+ if str, ok := scope.Get("gorm:insert_modifier"); ok {
+ insertModifier = strings.ToUpper(fmt.Sprint(str))
+ if insertModifier == "INTO" {
+ insertModifier = ""
+ }
+ }
+
+ if primaryField != nil {
+ returningColumn = scope.Quote(primaryField.DBName)
+ }
+
+ lastInsertIDOutputInterstitial := scope.Dialect().LastInsertIDOutputInterstitial(quotedTableName, returningColumn, columns)
+ var lastInsertIDReturningSuffix string
+ if lastInsertIDOutputInterstitial == "" {
+ lastInsertIDReturningSuffix = scope.Dialect().LastInsertIDReturningSuffix(quotedTableName, returningColumn)
+ }
+
+ if len(columns) == 0 {
+ scope.Raw(fmt.Sprintf(
+ "INSERT%v INTO %v %v%v%v",
+ addExtraSpaceIfExist(insertModifier),
+ quotedTableName,
+ scope.Dialect().DefaultValueStr(),
+ addExtraSpaceIfExist(extraOption),
+ addExtraSpaceIfExist(lastInsertIDReturningSuffix),
+ ))
+ } else {
+ scope.Raw(fmt.Sprintf(
+ "INSERT%v INTO %v (%v)%v VALUES (%v)%v%v",
+ addExtraSpaceIfExist(insertModifier),
+ scope.QuotedTableName(),
+ strings.Join(columns, ","),
+ addExtraSpaceIfExist(lastInsertIDOutputInterstitial),
+ strings.Join(placeholders, ","),
+ addExtraSpaceIfExist(extraOption),
+ addExtraSpaceIfExist(lastInsertIDReturningSuffix),
+ ))
+ }
+
+ // execute create sql: no primaryField
+ if primaryField == nil {
+ if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
+ // set rows affected count
+ scope.db.RowsAffected, _ = result.RowsAffected()
+
+ // set primary value to primary field
+ if primaryField != nil && primaryField.IsBlank {
+ if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil {
+ scope.Err(primaryField.Set(primaryValue))
+ }
+ }
+ }
+ return
+ }
+
+ // execute create sql: lastInsertID implemention for majority of dialects
+ if lastInsertIDReturningSuffix == "" && lastInsertIDOutputInterstitial == "" {
+ if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
+ // set rows affected count
+ scope.db.RowsAffected, _ = result.RowsAffected()
+
+ // set primary value to primary field
+ if primaryField != nil && primaryField.IsBlank {
+ if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil {
+ scope.Err(primaryField.Set(primaryValue))
+ }
+ }
+ }
+ return
+ }
+
+ // execute create sql: dialects with additional lastInsertID requirements (currently postgres & mssql)
+ if primaryField.Field.CanAddr() {
+ if err := scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...).Scan(primaryField.Field.Addr().Interface()); scope.Err(err) == nil {
+ primaryField.IsBlank = false
+ scope.db.RowsAffected = 1
+ }
+ } else {
+ scope.Err(ErrUnaddressable)
+ }
+ return
+ }
+}
+
+// forceReloadAfterCreateCallback will reload columns that having default value, and set it back to current object
+func forceReloadAfterCreateCallback(scope *Scope) {
+ if blankColumnsWithDefaultValue, ok := scope.InstanceGet("gorm:blank_columns_with_default_value"); ok {
+ db := scope.DB().New().Table(scope.TableName()).Select(blankColumnsWithDefaultValue.([]string))
+ for _, field := range scope.Fields() {
+ if field.IsPrimaryKey && !field.IsBlank {
+ db = db.Where(fmt.Sprintf("%v = ?", field.DBName), field.Field.Interface())
+ }
+ }
+ db.Scan(scope.Value)
+ }
+}
+
+// afterCreateCallback will invoke `AfterCreate`, `AfterSave` method after creating
+func afterCreateCallback(scope *Scope) {
+ if !scope.HasError() {
+ scope.CallMethod("AfterCreate")
+ }
+ if !scope.HasError() {
+ scope.CallMethod("AfterSave")
+ }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_delete.go b/vendor/github.com/jinzhu/gorm/callback_delete.go
new file mode 100644
index 0000000..48b97ac
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/callback_delete.go
@@ -0,0 +1,63 @@
+package gorm
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Define callbacks for deleting
+func init() {
+ DefaultCallback.Delete().Register("gorm:begin_transaction", beginTransactionCallback)
+ DefaultCallback.Delete().Register("gorm:before_delete", beforeDeleteCallback)
+ DefaultCallback.Delete().Register("gorm:delete", deleteCallback)
+ DefaultCallback.Delete().Register("gorm:after_delete", afterDeleteCallback)
+ DefaultCallback.Delete().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
+}
+
+// beforeDeleteCallback will invoke `BeforeDelete` method before deleting
+func beforeDeleteCallback(scope *Scope) {
+ if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() {
+ scope.Err(errors.New("missing WHERE clause while deleting"))
+ return
+ }
+ if !scope.HasError() {
+ scope.CallMethod("BeforeDelete")
+ }
+}
+
+// deleteCallback used to delete data from database or set deleted_at to current time (when using with soft delete)
+func deleteCallback(scope *Scope) {
+ if !scope.HasError() {
+ var extraOption string
+ if str, ok := scope.Get("gorm:delete_option"); ok {
+ extraOption = fmt.Sprint(str)
+ }
+
+ deletedAtField, hasDeletedAtField := scope.FieldByName("DeletedAt")
+
+ if !scope.Search.Unscoped && hasDeletedAtField {
+ scope.Raw(fmt.Sprintf(
+ "UPDATE %v SET %v=%v%v%v",
+ scope.QuotedTableName(),
+ scope.Quote(deletedAtField.DBName),
+ scope.AddToVars(scope.db.nowFunc()),
+ addExtraSpaceIfExist(scope.CombinedConditionSql()),
+ addExtraSpaceIfExist(extraOption),
+ )).Exec()
+ } else {
+ scope.Raw(fmt.Sprintf(
+ "DELETE FROM %v%v%v",
+ scope.QuotedTableName(),
+ addExtraSpaceIfExist(scope.CombinedConditionSql()),
+ addExtraSpaceIfExist(extraOption),
+ )).Exec()
+ }
+ }
+}
+
+// afterDeleteCallback will invoke `AfterDelete` method after deleting
+func afterDeleteCallback(scope *Scope) {
+ if !scope.HasError() {
+ scope.CallMethod("AfterDelete")
+ }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_query.go b/vendor/github.com/jinzhu/gorm/callback_query.go
new file mode 100644
index 0000000..544afd6
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/callback_query.go
@@ -0,0 +1,109 @@
+package gorm
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Define callbacks for querying
+func init() {
+ DefaultCallback.Query().Register("gorm:query", queryCallback)
+ DefaultCallback.Query().Register("gorm:preload", preloadCallback)
+ DefaultCallback.Query().Register("gorm:after_query", afterQueryCallback)
+}
+
+// queryCallback used to query data from database
+func queryCallback(scope *Scope) {
+ if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip {
+ return
+ }
+
+ //we are only preloading relations, dont touch base model
+ if _, skip := scope.InstanceGet("gorm:only_preload"); skip {
+ return
+ }
+
+ defer scope.trace(NowFunc())
+
+ var (
+ isSlice, isPtr bool
+ resultType reflect.Type
+ results = scope.IndirectValue()
+ )
+
+ if orderBy, ok := scope.Get("gorm:order_by_primary_key"); ok {
+ if primaryField := scope.PrimaryField(); primaryField != nil {
+ scope.Search.Order(fmt.Sprintf("%v.%v %v", scope.QuotedTableName(), scope.Quote(primaryField.DBName), orderBy))
+ }
+ }
+
+ if value, ok := scope.Get("gorm:query_destination"); ok {
+ results = indirect(reflect.ValueOf(value))
+ }
+
+ if kind := results.Kind(); kind == reflect.Slice {
+ isSlice = true
+ resultType = results.Type().Elem()
+ results.Set(reflect.MakeSlice(results.Type(), 0, 0))
+
+ if resultType.Kind() == reflect.Ptr {
+ isPtr = true
+ resultType = resultType.Elem()
+ }
+ } else if kind != reflect.Struct {
+ scope.Err(errors.New("unsupported destination, should be slice or struct"))
+ return
+ }
+
+ scope.prepareQuerySQL()
+
+ if !scope.HasError() {
+ scope.db.RowsAffected = 0
+
+ if str, ok := scope.Get("gorm:query_hint"); ok {
+ scope.SQL = fmt.Sprint(str) + scope.SQL
+ }
+
+ if str, ok := scope.Get("gorm:query_option"); ok {
+ scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str))
+ }
+
+ if rows, err := scope.SQLDB().Query(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
+ defer rows.Close()
+
+ columns, _ := rows.Columns()
+ for rows.Next() {
+ scope.db.RowsAffected++
+
+ elem := results
+ if isSlice {
+ elem = reflect.New(resultType).Elem()
+ }
+
+ scope.scan(rows, columns, scope.New(elem.Addr().Interface()).Fields())
+
+ if isSlice {
+ if isPtr {
+ results.Set(reflect.Append(results, elem.Addr()))
+ } else {
+ results.Set(reflect.Append(results, elem))
+ }
+ }
+ }
+
+ if err := rows.Err(); err != nil {
+ scope.Err(err)
+ } else if scope.db.RowsAffected == 0 && !isSlice {
+ scope.Err(ErrRecordNotFound)
+ }
+ }
+ }
+}
+
+// afterQueryCallback will invoke `AfterFind` method after querying
+func afterQueryCallback(scope *Scope) {
+ if !scope.HasError() {
+ scope.CallMethod("AfterFind")
+ }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_query_preload.go b/vendor/github.com/jinzhu/gorm/callback_query_preload.go
new file mode 100644
index 0000000..a936180
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/callback_query_preload.go
@@ -0,0 +1,410 @@
+package gorm
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// preloadCallback used to preload associations
+func preloadCallback(scope *Scope) {
+ if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip {
+ return
+ }
+
+ if ap, ok := scope.Get("gorm:auto_preload"); ok {
+ // If gorm:auto_preload IS NOT a bool then auto preload.
+ // Else if it IS a bool, use the value
+ if apb, ok := ap.(bool); !ok {
+ autoPreload(scope)
+ } else if apb {
+ autoPreload(scope)
+ }
+ }
+
+ if scope.Search.preload == nil || scope.HasError() {
+ return
+ }
+
+ var (
+ preloadedMap = map[string]bool{}
+ fields = scope.Fields()
+ )
+
+ for _, preload := range scope.Search.preload {
+ var (
+ preloadFields = strings.Split(preload.schema, ".")
+ currentScope = scope
+ currentFields = fields
+ )
+
+ for idx, preloadField := range preloadFields {
+ var currentPreloadConditions []interface{}
+
+ if currentScope == nil {
+ continue
+ }
+
+ // if not preloaded
+ if preloadKey := strings.Join(preloadFields[:idx+1], "."); !preloadedMap[preloadKey] {
+
+ // assign search conditions to last preload
+ if idx == len(preloadFields)-1 {
+ currentPreloadConditions = preload.conditions
+ }
+
+ for _, field := range currentFields {
+ if field.Name != preloadField || field.Relationship == nil {
+ continue
+ }
+
+ switch field.Relationship.Kind {
+ case "has_one":
+ currentScope.handleHasOnePreload(field, currentPreloadConditions)
+ case "has_many":
+ currentScope.handleHasManyPreload(field, currentPreloadConditions)
+ case "belongs_to":
+ currentScope.handleBelongsToPreload(field, currentPreloadConditions)
+ case "many_to_many":
+ currentScope.handleManyToManyPreload(field, currentPreloadConditions)
+ default:
+ scope.Err(errors.New("unsupported relation"))
+ }
+
+ preloadedMap[preloadKey] = true
+ break
+ }
+
+ if !preloadedMap[preloadKey] {
+ scope.Err(fmt.Errorf("can't preload field %s for %s", preloadField, currentScope.GetModelStruct().ModelType))
+ return
+ }
+ }
+
+ // preload next level
+ if idx < len(preloadFields)-1 {
+ currentScope = currentScope.getColumnAsScope(preloadField)
+ if currentScope != nil {
+ currentFields = currentScope.Fields()
+ }
+ }
+ }
+ }
+}
+
+func autoPreload(scope *Scope) {
+ for _, field := range scope.Fields() {
+ if field.Relationship == nil {
+ continue
+ }
+
+ if val, ok := field.TagSettingsGet("PRELOAD"); ok {
+ if preload, err := strconv.ParseBool(val); err != nil {
+ scope.Err(errors.New("invalid preload option"))
+ return
+ } else if !preload {
+ continue
+ }
+ }
+
+ scope.Search.Preload(field.Name)
+ }
+}
+
+func (scope *Scope) generatePreloadDBWithConditions(conditions []interface{}) (*DB, []interface{}) {
+ var (
+ preloadDB = scope.NewDB()
+ preloadConditions []interface{}
+ )
+
+ for _, condition := range conditions {
+ if scopes, ok := condition.(func(*DB) *DB); ok {
+ preloadDB = scopes(preloadDB)
+ } else {
+ preloadConditions = append(preloadConditions, condition)
+ }
+ }
+
+ return preloadDB, preloadConditions
+}
+
+// handleHasOnePreload used to preload has one associations
+func (scope *Scope) handleHasOnePreload(field *Field, conditions []interface{}) {
+ relation := field.Relationship
+
+ // get relations's primary keys
+ primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value)
+ if len(primaryKeys) == 0 {
+ return
+ }
+
+ // preload conditions
+ preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
+
+ // find relations
+ query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys))
+ values := toQueryValues(primaryKeys)
+ if relation.PolymorphicType != "" {
+ query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName))
+ values = append(values, relation.PolymorphicValue)
+ }
+
+ results := makeSlice(field.Struct.Type)
+ scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error)
+
+ // assign find results
+ var (
+ resultsValue = indirect(reflect.ValueOf(results))
+ indirectScopeValue = scope.IndirectValue()
+ )
+
+ if indirectScopeValue.Kind() == reflect.Slice {
+ foreignValuesToResults := make(map[string]reflect.Value)
+ for i := 0; i < resultsValue.Len(); i++ {
+ result := resultsValue.Index(i)
+ foreignValues := toString(getValueFromFields(result, relation.ForeignFieldNames))
+ foreignValuesToResults[foreignValues] = result
+ }
+ for j := 0; j < indirectScopeValue.Len(); j++ {
+ indirectValue := indirect(indirectScopeValue.Index(j))
+ valueString := toString(getValueFromFields(indirectValue, relation.AssociationForeignFieldNames))
+ if result, found := foreignValuesToResults[valueString]; found {
+ indirectValue.FieldByName(field.Name).Set(result)
+ }
+ }
+ } else {
+ for i := 0; i < resultsValue.Len(); i++ {
+ result := resultsValue.Index(i)
+ scope.Err(field.Set(result))
+ }
+ }
+}
+
+// handleHasManyPreload used to preload has many associations
+func (scope *Scope) handleHasManyPreload(field *Field, conditions []interface{}) {
+ relation := field.Relationship
+
+ // get relations's primary keys
+ primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value)
+ if len(primaryKeys) == 0 {
+ return
+ }
+
+ // preload conditions
+ preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
+
+ // find relations
+ query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys))
+ values := toQueryValues(primaryKeys)
+ if relation.PolymorphicType != "" {
+ query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName))
+ values = append(values, relation.PolymorphicValue)
+ }
+
+ results := makeSlice(field.Struct.Type)
+ scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error)
+
+ // assign find results
+ var (
+ resultsValue = indirect(reflect.ValueOf(results))
+ indirectScopeValue = scope.IndirectValue()
+ )
+
+ if indirectScopeValue.Kind() == reflect.Slice {
+ preloadMap := make(map[string][]reflect.Value)
+ for i := 0; i < resultsValue.Len(); i++ {
+ result := resultsValue.Index(i)
+ foreignValues := getValueFromFields(result, relation.ForeignFieldNames)
+ preloadMap[toString(foreignValues)] = append(preloadMap[toString(foreignValues)], result)
+ }
+
+ for j := 0; j < indirectScopeValue.Len(); j++ {
+ object := indirect(indirectScopeValue.Index(j))
+ objectRealValue := getValueFromFields(object, relation.AssociationForeignFieldNames)
+ f := object.FieldByName(field.Name)
+ if results, ok := preloadMap[toString(objectRealValue)]; ok {
+ f.Set(reflect.Append(f, results...))
+ } else {
+ f.Set(reflect.MakeSlice(f.Type(), 0, 0))
+ }
+ }
+ } else {
+ scope.Err(field.Set(resultsValue))
+ }
+}
+
+// handleBelongsToPreload used to preload belongs to associations
+func (scope *Scope) handleBelongsToPreload(field *Field, conditions []interface{}) {
+ relation := field.Relationship
+
+ // preload conditions
+ preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
+
+ // get relations's primary keys
+ primaryKeys := scope.getColumnAsArray(relation.ForeignFieldNames, scope.Value)
+ if len(primaryKeys) == 0 {
+ return
+ }
+
+ // find relations
+ results := makeSlice(field.Struct.Type)
+ scope.Err(preloadDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.AssociationForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, preloadConditions...).Error)
+
+ // assign find results
+ var (
+ resultsValue = indirect(reflect.ValueOf(results))
+ indirectScopeValue = scope.IndirectValue()
+ )
+
+ foreignFieldToObjects := make(map[string][]*reflect.Value)
+ if indirectScopeValue.Kind() == reflect.Slice {
+ for j := 0; j < indirectScopeValue.Len(); j++ {
+ object := indirect(indirectScopeValue.Index(j))
+ valueString := toString(getValueFromFields(object, relation.ForeignFieldNames))
+ foreignFieldToObjects[valueString] = append(foreignFieldToObjects[valueString], &object)
+ }
+ }
+
+ for i := 0; i < resultsValue.Len(); i++ {
+ result := resultsValue.Index(i)
+ if indirectScopeValue.Kind() == reflect.Slice {
+ valueString := toString(getValueFromFields(result, relation.AssociationForeignFieldNames))
+ if objects, found := foreignFieldToObjects[valueString]; found {
+ for _, object := range objects {
+ object.FieldByName(field.Name).Set(result)
+ }
+ }
+ } else {
+ scope.Err(field.Set(result))
+ }
+ }
+}
+
+// handleManyToManyPreload used to preload many to many associations
+func (scope *Scope) handleManyToManyPreload(field *Field, conditions []interface{}) {
+ var (
+ relation = field.Relationship
+ joinTableHandler = relation.JoinTableHandler
+ fieldType = field.Struct.Type.Elem()
+ foreignKeyValue interface{}
+ foreignKeyType = reflect.ValueOf(&foreignKeyValue).Type()
+ linkHash = map[string][]reflect.Value{}
+ isPtr bool
+ )
+
+ if fieldType.Kind() == reflect.Ptr {
+ isPtr = true
+ fieldType = fieldType.Elem()
+ }
+
+ var sourceKeys = []string{}
+ for _, key := range joinTableHandler.SourceForeignKeys() {
+ sourceKeys = append(sourceKeys, key.DBName)
+ }
+
+ // preload conditions
+ preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
+
+ // generate query with join table
+ newScope := scope.New(reflect.New(fieldType).Interface())
+ preloadDB = preloadDB.Table(newScope.TableName()).Model(newScope.Value)
+
+ if len(preloadDB.search.selects) == 0 {
+ preloadDB = preloadDB.Select("*")
+ }
+
+ preloadDB = joinTableHandler.JoinWith(joinTableHandler, preloadDB, scope.Value)
+
+ // preload inline conditions
+ if len(preloadConditions) > 0 {
+ preloadDB = preloadDB.Where(preloadConditions[0], preloadConditions[1:]...)
+ }
+
+ rows, err := preloadDB.Rows()
+
+ if scope.Err(err) != nil {
+ return
+ }
+ defer rows.Close()
+
+ columns, _ := rows.Columns()
+ for rows.Next() {
+ var (
+ elem = reflect.New(fieldType).Elem()
+ fields = scope.New(elem.Addr().Interface()).Fields()
+ )
+
+ // register foreign keys in join tables
+ var joinTableFields []*Field
+ for _, sourceKey := range sourceKeys {
+ joinTableFields = append(joinTableFields, &Field{StructField: &StructField{DBName: sourceKey, IsNormal: true}, Field: reflect.New(foreignKeyType).Elem()})
+ }
+
+ scope.scan(rows, columns, append(fields, joinTableFields...))
+
+ scope.New(elem.Addr().Interface()).
+ InstanceSet("gorm:skip_query_callback", true).
+ callCallbacks(scope.db.parent.callbacks.queries)
+
+ var foreignKeys = make([]interface{}, len(sourceKeys))
+ // generate hashed forkey keys in join table
+ for idx, joinTableField := range joinTableFields {
+ if !joinTableField.Field.IsNil() {
+ foreignKeys[idx] = joinTableField.Field.Elem().Interface()
+ }
+ }
+ hashedSourceKeys := toString(foreignKeys)
+
+ if isPtr {
+ linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem.Addr())
+ } else {
+ linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem)
+ }
+ }
+
+ if err := rows.Err(); err != nil {
+ scope.Err(err)
+ }
+
+ // assign find results
+ var (
+ indirectScopeValue = scope.IndirectValue()
+ fieldsSourceMap = map[string][]reflect.Value{}
+ foreignFieldNames = []string{}
+ )
+
+ for _, dbName := range relation.ForeignFieldNames {
+ if field, ok := scope.FieldByName(dbName); ok {
+ foreignFieldNames = append(foreignFieldNames, field.Name)
+ }
+ }
+
+ if indirectScopeValue.Kind() == reflect.Slice {
+ for j := 0; j < indirectScopeValue.Len(); j++ {
+ object := indirect(indirectScopeValue.Index(j))
+ key := toString(getValueFromFields(object, foreignFieldNames))
+ fieldsSourceMap[key] = append(fieldsSourceMap[key], object.FieldByName(field.Name))
+ }
+ } else if indirectScopeValue.IsValid() {
+ key := toString(getValueFromFields(indirectScopeValue, foreignFieldNames))
+ fieldsSourceMap[key] = append(fieldsSourceMap[key], indirectScopeValue.FieldByName(field.Name))
+ }
+
+ for source, fields := range fieldsSourceMap {
+ for _, f := range fields {
+ //If not 0 this means Value is a pointer and we already added preloaded models to it
+ if f.Len() != 0 {
+ continue
+ }
+
+ v := reflect.MakeSlice(f.Type(), 0, 0)
+ if len(linkHash[source]) > 0 {
+ v = reflect.Append(f, linkHash[source]...)
+ }
+
+ f.Set(v)
+ }
+ }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_row_query.go b/vendor/github.com/jinzhu/gorm/callback_row_query.go
new file mode 100644
index 0000000..323b160
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/callback_row_query.go
@@ -0,0 +1,41 @@
+package gorm
+
+import (
+ "database/sql"
+ "fmt"
+)
+
+// Define callbacks for row query
+func init() {
+ DefaultCallback.RowQuery().Register("gorm:row_query", rowQueryCallback)
+}
+
+type RowQueryResult struct {
+ Row *sql.Row
+}
+
+type RowsQueryResult struct {
+ Rows *sql.Rows
+ Error error
+}
+
+// queryCallback used to query data from database
+func rowQueryCallback(scope *Scope) {
+ if result, ok := scope.InstanceGet("row_query_result"); ok {
+ scope.prepareQuerySQL()
+
+ if str, ok := scope.Get("gorm:query_hint"); ok {
+ scope.SQL = fmt.Sprint(str) + scope.SQL
+ }
+
+ if str, ok := scope.Get("gorm:query_option"); ok {
+ scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str))
+ }
+
+ if rowResult, ok := result.(*RowQueryResult); ok {
+ rowResult.Row = scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...)
+ } else if rowsResult, ok := result.(*RowsQueryResult); ok {
+ rowsResult.Rows, rowsResult.Error = scope.SQLDB().Query(scope.SQL, scope.SQLVars...)
+ }
+ }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_save.go b/vendor/github.com/jinzhu/gorm/callback_save.go
new file mode 100644
index 0000000..3b4e058
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/callback_save.go
@@ -0,0 +1,170 @@
+package gorm
+
+import (
+ "reflect"
+ "strings"
+)
+
+func beginTransactionCallback(scope *Scope) {
+ scope.Begin()
+}
+
+func commitOrRollbackTransactionCallback(scope *Scope) {
+ scope.CommitOrRollback()
+}
+
+func saveAssociationCheck(scope *Scope, field *Field) (autoUpdate bool, autoCreate bool, saveReference bool, r *Relationship) {
+ checkTruth := func(value interface{}) bool {
+ if v, ok := value.(bool); ok && !v {
+ return false
+ }
+
+ if v, ok := value.(string); ok {
+ v = strings.ToLower(v)
+ return v == "true"
+ }
+
+ return true
+ }
+
+ if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored {
+ if r = field.Relationship; r != nil {
+ autoUpdate, autoCreate, saveReference = true, true, true
+
+ if value, ok := scope.Get("gorm:save_associations"); ok {
+ autoUpdate = checkTruth(value)
+ autoCreate = autoUpdate
+ saveReference = autoUpdate
+ } else if value, ok := field.TagSettingsGet("SAVE_ASSOCIATIONS"); ok {
+ autoUpdate = checkTruth(value)
+ autoCreate = autoUpdate
+ saveReference = autoUpdate
+ }
+
+ if value, ok := scope.Get("gorm:association_autoupdate"); ok {
+ autoUpdate = checkTruth(value)
+ } else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOUPDATE"); ok {
+ autoUpdate = checkTruth(value)
+ }
+
+ if value, ok := scope.Get("gorm:association_autocreate"); ok {
+ autoCreate = checkTruth(value)
+ } else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOCREATE"); ok {
+ autoCreate = checkTruth(value)
+ }
+
+ if value, ok := scope.Get("gorm:association_save_reference"); ok {
+ saveReference = checkTruth(value)
+ } else if value, ok := field.TagSettingsGet("ASSOCIATION_SAVE_REFERENCE"); ok {
+ saveReference = checkTruth(value)
+ }
+ }
+ }
+
+ return
+}
+
+func saveBeforeAssociationsCallback(scope *Scope) {
+ for _, field := range scope.Fields() {
+ autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field)
+
+ if relationship != nil && relationship.Kind == "belongs_to" {
+ fieldValue := field.Field.Addr().Interface()
+ newScope := scope.New(fieldValue)
+
+ if newScope.PrimaryKeyZero() {
+ if autoCreate {
+ scope.Err(scope.NewDB().Save(fieldValue).Error)
+ }
+ } else if autoUpdate {
+ scope.Err(scope.NewDB().Save(fieldValue).Error)
+ }
+
+ if saveReference {
+ if len(relationship.ForeignFieldNames) != 0 {
+ // set value's foreign key
+ for idx, fieldName := range relationship.ForeignFieldNames {
+ associationForeignName := relationship.AssociationForeignDBNames[idx]
+ if foreignField, ok := scope.New(fieldValue).FieldByName(associationForeignName); ok {
+ scope.Err(scope.SetColumn(fieldName, foreignField.Field.Interface()))
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func saveAfterAssociationsCallback(scope *Scope) {
+ for _, field := range scope.Fields() {
+ autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field)
+
+ if relationship != nil && (relationship.Kind == "has_one" || relationship.Kind == "has_many" || relationship.Kind == "many_to_many") {
+ value := field.Field
+
+ switch value.Kind() {
+ case reflect.Slice:
+ for i := 0; i < value.Len(); i++ {
+ newDB := scope.NewDB()
+ elem := value.Index(i).Addr().Interface()
+ newScope := newDB.NewScope(elem)
+
+ if saveReference {
+ if relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 {
+ for idx, fieldName := range relationship.ForeignFieldNames {
+ associationForeignName := relationship.AssociationForeignDBNames[idx]
+ if f, ok := scope.FieldByName(associationForeignName); ok {
+ scope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))
+ }
+ }
+ }
+
+ if relationship.PolymorphicType != "" {
+ scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue))
+ }
+ }
+
+ if newScope.PrimaryKeyZero() {
+ if autoCreate {
+ scope.Err(newDB.Save(elem).Error)
+ }
+ } else if autoUpdate {
+ scope.Err(newDB.Save(elem).Error)
+ }
+
+ if !scope.New(newScope.Value).PrimaryKeyZero() && saveReference {
+ if joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil {
+ scope.Err(joinTableHandler.Add(joinTableHandler, newDB, scope.Value, newScope.Value))
+ }
+ }
+ }
+ default:
+ elem := value.Addr().Interface()
+ newScope := scope.New(elem)
+
+ if saveReference {
+ if len(relationship.ForeignFieldNames) != 0 {
+ for idx, fieldName := range relationship.ForeignFieldNames {
+ associationForeignName := relationship.AssociationForeignDBNames[idx]
+ if f, ok := scope.FieldByName(associationForeignName); ok {
+ scope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))
+ }
+ }
+ }
+
+ if relationship.PolymorphicType != "" {
+ scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue))
+ }
+ }
+
+ if newScope.PrimaryKeyZero() {
+ if autoCreate {
+ scope.Err(scope.NewDB().Save(elem).Error)
+ }
+ } else if autoUpdate {
+ scope.Err(scope.NewDB().Save(elem).Error)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/jinzhu/gorm/callback_update.go b/vendor/github.com/jinzhu/gorm/callback_update.go
new file mode 100644
index 0000000..699e534
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/callback_update.go
@@ -0,0 +1,121 @@
+package gorm
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Define callbacks for updating
+func init() {
+ DefaultCallback.Update().Register("gorm:assign_updating_attributes", assignUpdatingAttributesCallback)
+ DefaultCallback.Update().Register("gorm:begin_transaction", beginTransactionCallback)
+ DefaultCallback.Update().Register("gorm:before_update", beforeUpdateCallback)
+ DefaultCallback.Update().Register("gorm:save_before_associations", saveBeforeAssociationsCallback)
+ DefaultCallback.Update().Register("gorm:update_time_stamp", updateTimeStampForUpdateCallback)
+ DefaultCallback.Update().Register("gorm:update", updateCallback)
+ DefaultCallback.Update().Register("gorm:save_after_associations", saveAfterAssociationsCallback)
+ DefaultCallback.Update().Register("gorm:after_update", afterUpdateCallback)
+ DefaultCallback.Update().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
+}
+
+// assignUpdatingAttributesCallback assign updating attributes to model
+func assignUpdatingAttributesCallback(scope *Scope) {
+ if attrs, ok := scope.InstanceGet("gorm:update_interface"); ok {
+ if updateMaps, hasUpdate := scope.updatedAttrsWithValues(attrs); hasUpdate {
+ scope.InstanceSet("gorm:update_attrs", updateMaps)
+ } else {
+ scope.SkipLeft()
+ }
+ }
+}
+
+// beforeUpdateCallback will invoke `BeforeSave`, `BeforeUpdate` method before updating
+func beforeUpdateCallback(scope *Scope) {
+ if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() {
+ scope.Err(errors.New("missing WHERE clause while updating"))
+ return
+ }
+ if _, ok := scope.Get("gorm:update_column"); !ok {
+ if !scope.HasError() {
+ scope.CallMethod("BeforeSave")
+ }
+ if !scope.HasError() {
+ scope.CallMethod("BeforeUpdate")
+ }
+ }
+}
+
+// updateTimeStampForUpdateCallback will set `UpdatedAt` when updating
+func updateTimeStampForUpdateCallback(scope *Scope) {
+ if _, ok := scope.Get("gorm:update_column"); !ok {
+ scope.SetColumn("UpdatedAt", scope.db.nowFunc())
+ }
+}
+
+// updateCallback the callback used to update data to database
+func updateCallback(scope *Scope) {
+ if !scope.HasError() {
+ var sqls []string
+
+ if updateAttrs, ok := scope.InstanceGet("gorm:update_attrs"); ok {
+ // Sort the column names so that the generated SQL is the same every time.
+ updateMap := updateAttrs.(map[string]interface{})
+ var columns []string
+ for c := range updateMap {
+ columns = append(columns, c)
+ }
+ sort.Strings(columns)
+
+ for _, column := range columns {
+ value := updateMap[column]
+ sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(column), scope.AddToVars(value)))
+ }
+ } else {
+ for _, field := range scope.Fields() {
+ if scope.changeableField(field) {
+ if !field.IsPrimaryKey && field.IsNormal && (field.Name != "CreatedAt" || !field.IsBlank) {
+ if !field.IsForeignKey || !field.IsBlank || !field.HasDefaultValue {
+ sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface())))
+ }
+ } else if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" {
+ for _, foreignKey := range relationship.ForeignDBNames {
+ if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) {
+ sqls = append(sqls,
+ fmt.Sprintf("%v = %v", scope.Quote(foreignField.DBName), scope.AddToVars(foreignField.Field.Interface())))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ var extraOption string
+ if str, ok := scope.Get("gorm:update_option"); ok {
+ extraOption = fmt.Sprint(str)
+ }
+
+ if len(sqls) > 0 {
+ scope.Raw(fmt.Sprintf(
+ "UPDATE %v SET %v%v%v",
+ scope.QuotedTableName(),
+ strings.Join(sqls, ", "),
+ addExtraSpaceIfExist(scope.CombinedConditionSql()),
+ addExtraSpaceIfExist(extraOption),
+ )).Exec()
+ }
+ }
+}
+
+// afterUpdateCallback will invoke `AfterUpdate`, `AfterSave` method after updating
+func afterUpdateCallback(scope *Scope) {
+ if _, ok := scope.Get("gorm:update_column"); !ok {
+ if !scope.HasError() {
+ scope.CallMethod("AfterUpdate")
+ }
+ if !scope.HasError() {
+ scope.CallMethod("AfterSave")
+ }
+ }
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect.go b/vendor/github.com/jinzhu/gorm/dialect.go
new file mode 100644
index 0000000..749587f
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/dialect.go
@@ -0,0 +1,147 @@
+package gorm
+
+import (
+ "database/sql"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// Dialect interface contains behaviors that differ across SQL database
+type Dialect interface {
+ // GetName get dialect's name
+ GetName() string
+
+ // SetDB set db for dialect
+ SetDB(db SQLCommon)
+
+ // BindVar return the placeholder for actual values in SQL statements, in many dbs it is "?", Postgres using $1
+ BindVar(i int) string
+ // Quote quotes field name to avoid SQL parsing exceptions by using a reserved word as a field name
+ Quote(key string) string
+ // DataTypeOf return data's sql type
+ DataTypeOf(field *StructField) string
+
+ // HasIndex check has index or not
+ HasIndex(tableName string, indexName string) bool
+ // HasForeignKey check has foreign key or not
+ HasForeignKey(tableName string, foreignKeyName string) bool
+ // RemoveIndex remove index
+ RemoveIndex(tableName string, indexName string) error
+ // HasTable check has table or not
+ HasTable(tableName string) bool
+ // HasColumn check has column or not
+ HasColumn(tableName string, columnName string) bool
+ // ModifyColumn modify column's type
+ ModifyColumn(tableName string, columnName string, typ string) error
+
+ // LimitAndOffsetSQL return generated SQL with Limit and Offset, as mssql has special case
+ LimitAndOffsetSQL(limit, offset interface{}) (string, error)
+ // SelectFromDummyTable return select values, for most dbs, `SELECT values` just works, mysql needs `SELECT value FROM DUAL`
+ SelectFromDummyTable() string
+ // LastInsertIDOutputInterstitial most dbs support LastInsertId, but mssql needs to use `OUTPUT`
+ LastInsertIDOutputInterstitial(tableName, columnName string, columns []string) string
+ // LastInsertIdReturningSuffix most dbs support LastInsertId, but postgres needs to use `RETURNING`
+ LastInsertIDReturningSuffix(tableName, columnName string) string
+ // DefaultValueStr
+ DefaultValueStr() string
+
+ // BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
+ BuildKeyName(kind, tableName string, fields ...string) string
+
+ // NormalizeIndexAndColumn returns valid index name and column name depending on each dialect
+ NormalizeIndexAndColumn(indexName, columnName string) (string, string)
+
+ // CurrentDatabase return current database name
+ CurrentDatabase() string
+}
+
+var dialectsMap = map[string]Dialect{}
+
+func newDialect(name string, db SQLCommon) Dialect {
+ if value, ok := dialectsMap[name]; ok {
+ dialect := reflect.New(reflect.TypeOf(value).Elem()).Interface().(Dialect)
+ dialect.SetDB(db)
+ return dialect
+ }
+
+ fmt.Printf("`%v` is not officially supported, running under compatibility mode.\n", name)
+ commontDialect := &commonDialect{}
+ commontDialect.SetDB(db)
+ return commontDialect
+}
+
+// RegisterDialect register new dialect
+func RegisterDialect(name string, dialect Dialect) {
+ dialectsMap[name] = dialect
+}
+
+// GetDialect gets the dialect for the specified dialect name
+func GetDialect(name string) (dialect Dialect, ok bool) {
+ dialect, ok = dialectsMap[name]
+ return
+}
+
+// ParseFieldStructForDialect get field's sql data type
+var ParseFieldStructForDialect = func(field *StructField, dialect Dialect) (fieldValue reflect.Value, sqlType string, size int, additionalType string) {
+ // Get redirected field type
+ var (
+ reflectType = field.Struct.Type
+ dataType, _ = field.TagSettingsGet("TYPE")
+ )
+
+ for reflectType.Kind() == reflect.Ptr {
+ reflectType = reflectType.Elem()
+ }
+
+ // Get redirected field value
+ fieldValue = reflect.Indirect(reflect.New(reflectType))
+
+ if gormDataType, ok := fieldValue.Interface().(interface {
+ GormDataType(Dialect) string
+ }); ok {
+ dataType = gormDataType.GormDataType(dialect)
+ }
+
+ // Get scanner's real value
+ if dataType == "" {
+ var getScannerValue func(reflect.Value)
+ getScannerValue = func(value reflect.Value) {
+ fieldValue = value
+ if _, isScanner := reflect.New(fieldValue.Type()).Interface().(sql.Scanner); isScanner && fieldValue.Kind() == reflect.Struct {
+ getScannerValue(fieldValue.Field(0))
+ }
+ }
+ getScannerValue(fieldValue)
+ }
+
+ // Default Size
+ if num, ok := field.TagSettingsGet("SIZE"); ok {
+ size, _ = strconv.Atoi(num)
+ } else {
+ size = 255
+ }
+
+ // Default type from tag setting
+ notNull, _ := field.TagSettingsGet("NOT NULL")
+ unique, _ := field.TagSettingsGet("UNIQUE")
+ additionalType = notNull + " " + unique
+ if value, ok := field.TagSettingsGet("DEFAULT"); ok {
+ additionalType = additionalType + " DEFAULT " + value
+ }
+
+ if value, ok := field.TagSettingsGet("COMMENT"); ok {
+ additionalType = additionalType + " COMMENT " + value
+ }
+
+ return fieldValue, dataType, size, strings.TrimSpace(additionalType)
+}
+
+func currentDatabaseAndTable(dialect Dialect, tableName string) (string, string) {
+ if strings.Contains(tableName, ".") {
+ splitStrings := strings.SplitN(tableName, ".", 2)
+ return splitStrings[0], splitStrings[1]
+ }
+ return dialect.CurrentDatabase(), tableName
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect_common.go b/vendor/github.com/jinzhu/gorm/dialect_common.go
new file mode 100644
index 0000000..d549510
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/dialect_common.go
@@ -0,0 +1,196 @@
+package gorm
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var keyNameRegex = regexp.MustCompile("[^a-zA-Z0-9]+")
+
+// DefaultForeignKeyNamer contains the default foreign key name generator method
+type DefaultForeignKeyNamer struct {
+}
+
+type commonDialect struct {
+ db SQLCommon
+ DefaultForeignKeyNamer
+}
+
+func init() {
+ RegisterDialect("common", &commonDialect{})
+}
+
+func (commonDialect) GetName() string {
+ return "common"
+}
+
+func (s *commonDialect) SetDB(db SQLCommon) {
+ s.db = db
+}
+
+func (commonDialect) BindVar(i int) string {
+ return "$$$" // ?
+}
+
+func (commonDialect) Quote(key string) string {
+ return fmt.Sprintf(`"%s"`, key)
+}
+
+func (s *commonDialect) fieldCanAutoIncrement(field *StructField) bool {
+ if value, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
+ return strings.ToLower(value) != "false"
+ }
+ return field.IsPrimaryKey
+}
+
+func (s *commonDialect) DataTypeOf(field *StructField) string {
+ var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
+
+ if sqlType == "" {
+ switch dataValue.Kind() {
+ case reflect.Bool:
+ sqlType = "BOOLEAN"
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
+ if s.fieldCanAutoIncrement(field) {
+ sqlType = "INTEGER AUTO_INCREMENT"
+ } else {
+ sqlType = "INTEGER"
+ }
+ case reflect.Int64, reflect.Uint64:
+ if s.fieldCanAutoIncrement(field) {
+ sqlType = "BIGINT AUTO_INCREMENT"
+ } else {
+ sqlType = "BIGINT"
+ }
+ case reflect.Float32, reflect.Float64:
+ sqlType = "FLOAT"
+ case reflect.String:
+ if size > 0 && size < 65532 {
+ sqlType = fmt.Sprintf("VARCHAR(%d)", size)
+ } else {
+ sqlType = "VARCHAR(65532)"
+ }
+ case reflect.Struct:
+ if _, ok := dataValue.Interface().(time.Time); ok {
+ sqlType = "TIMESTAMP"
+ }
+ default:
+ if _, ok := dataValue.Interface().([]byte); ok {
+ if size > 0 && size < 65532 {
+ sqlType = fmt.Sprintf("BINARY(%d)", size)
+ } else {
+ sqlType = "BINARY(65532)"
+ }
+ }
+ }
+ }
+
+ if sqlType == "" {
+ panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String()))
+ }
+
+ if strings.TrimSpace(additionalType) == "" {
+ return sqlType
+ }
+ return fmt.Sprintf("%v %v", sqlType, additionalType)
+}
+
+func (s commonDialect) HasIndex(tableName string, indexName string) bool {
+ var count int
+ currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+ s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", currentDatabase, tableName, indexName).Scan(&count)
+ return count > 0
+}
+
+func (s commonDialect) RemoveIndex(tableName string, indexName string) error {
+ _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName))
+ return err
+}
+
+func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool {
+ return false
+}
+
+func (s commonDialect) HasTable(tableName string) bool {
+ var count int
+ currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+ s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", currentDatabase, tableName).Scan(&count)
+ return count > 0
+}
+
+func (s commonDialect) HasColumn(tableName string, columnName string) bool {
+ var count int
+ currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+ s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count)
+ return count > 0
+}
+
+func (s commonDialect) ModifyColumn(tableName string, columnName string, typ string) error {
+ _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v TYPE %v", tableName, columnName, typ))
+ return err
+}
+
+func (s commonDialect) CurrentDatabase() (name string) {
+ s.db.QueryRow("SELECT DATABASE()").Scan(&name)
+ return
+}
+
+// LimitAndOffsetSQL return generated SQL with Limit and Offset
+func (s commonDialect) LimitAndOffsetSQL(limit, offset interface{}) (sql string, err error) {
+ if limit != nil {
+ if parsedLimit, err := s.parseInt(limit); err != nil {
+ return "", err
+ } else if parsedLimit >= 0 {
+ sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
+ }
+ }
+ if offset != nil {
+ if parsedOffset, err := s.parseInt(offset); err != nil {
+ return "", err
+ } else if parsedOffset >= 0 {
+ sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
+ }
+ }
+ return
+}
+
+func (commonDialect) SelectFromDummyTable() string {
+ return ""
+}
+
+func (commonDialect) LastInsertIDOutputInterstitial(tableName, columnName string, columns []string) string {
+ return ""
+}
+
+func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string {
+ return ""
+}
+
+func (commonDialect) DefaultValueStr() string {
+ return "DEFAULT VALUES"
+}
+
+// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
+func (DefaultForeignKeyNamer) BuildKeyName(kind, tableName string, fields ...string) string {
+ keyName := fmt.Sprintf("%s_%s_%s", kind, tableName, strings.Join(fields, "_"))
+ keyName = keyNameRegex.ReplaceAllString(keyName, "_")
+ return keyName
+}
+
+// NormalizeIndexAndColumn returns argument's index name and column name without doing anything
+func (commonDialect) NormalizeIndexAndColumn(indexName, columnName string) (string, string) {
+ return indexName, columnName
+}
+
+func (commonDialect) parseInt(value interface{}) (int64, error) {
+ return strconv.ParseInt(fmt.Sprint(value), 0, 0)
+}
+
+// IsByteArrayOrSlice returns true of the reflected value is an array or slice
+func IsByteArrayOrSlice(value reflect.Value) bool {
+ return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0))
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect_mysql.go b/vendor/github.com/jinzhu/gorm/dialect_mysql.go
new file mode 100644
index 0000000..b4467ff
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/dialect_mysql.go
@@ -0,0 +1,246 @@
+package gorm
+
+import (
+ "crypto/sha1"
+ "database/sql"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+var mysqlIndexRegex = regexp.MustCompile(`^(.+)\((\d+)\)$`)
+
+type mysql struct {
+ commonDialect
+}
+
+func init() {
+ RegisterDialect("mysql", &mysql{})
+}
+
+func (mysql) GetName() string {
+ return "mysql"
+}
+
+func (mysql) Quote(key string) string {
+ return fmt.Sprintf("`%s`", key)
+}
+
+// Get Data Type for MySQL Dialect
+func (s *mysql) DataTypeOf(field *StructField) string {
+ var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
+
+ // MySQL allows only one auto increment column per table, and it must
+ // be a KEY column.
+ if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
+ if _, ok = field.TagSettingsGet("INDEX"); !ok && !field.IsPrimaryKey {
+ field.TagSettingsDelete("AUTO_INCREMENT")
+ }
+ }
+
+ if sqlType == "" {
+ switch dataValue.Kind() {
+ case reflect.Bool:
+ sqlType = "boolean"
+ case reflect.Int8:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "tinyint AUTO_INCREMENT"
+ } else {
+ sqlType = "tinyint"
+ }
+ case reflect.Int, reflect.Int16, reflect.Int32:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "int AUTO_INCREMENT"
+ } else {
+ sqlType = "int"
+ }
+ case reflect.Uint8:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "tinyint unsigned AUTO_INCREMENT"
+ } else {
+ sqlType = "tinyint unsigned"
+ }
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "int unsigned AUTO_INCREMENT"
+ } else {
+ sqlType = "int unsigned"
+ }
+ case reflect.Int64:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "bigint AUTO_INCREMENT"
+ } else {
+ sqlType = "bigint"
+ }
+ case reflect.Uint64:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "bigint unsigned AUTO_INCREMENT"
+ } else {
+ sqlType = "bigint unsigned"
+ }
+ case reflect.Float32, reflect.Float64:
+ sqlType = "double"
+ case reflect.String:
+ if size > 0 && size < 65532 {
+ sqlType = fmt.Sprintf("varchar(%d)", size)
+ } else {
+ sqlType = "longtext"
+ }
+ case reflect.Struct:
+ if _, ok := dataValue.Interface().(time.Time); ok {
+ precision := ""
+ if p, ok := field.TagSettingsGet("PRECISION"); ok {
+ precision = fmt.Sprintf("(%s)", p)
+ }
+
+ if _, ok := field.TagSettings["NOT NULL"]; ok || field.IsPrimaryKey {
+ sqlType = fmt.Sprintf("DATETIME%v", precision)
+ } else {
+ sqlType = fmt.Sprintf("DATETIME%v NULL", precision)
+ }
+ }
+ default:
+ if IsByteArrayOrSlice(dataValue) {
+ if size > 0 && size < 65532 {
+ sqlType = fmt.Sprintf("varbinary(%d)", size)
+ } else {
+ sqlType = "longblob"
+ }
+ }
+ }
+ }
+
+ if sqlType == "" {
+ panic(fmt.Sprintf("invalid sql type %s (%s) in field %s for mysql", dataValue.Type().Name(), dataValue.Kind().String(), field.Name))
+ }
+
+ if strings.TrimSpace(additionalType) == "" {
+ return sqlType
+ }
+ return fmt.Sprintf("%v %v", sqlType, additionalType)
+}
+
+func (s mysql) RemoveIndex(tableName string, indexName string) error {
+ _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName)))
+ return err
+}
+
+func (s mysql) ModifyColumn(tableName string, columnName string, typ string) error {
+ _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v MODIFY COLUMN %v %v", tableName, columnName, typ))
+ return err
+}
+
+func (s mysql) LimitAndOffsetSQL(limit, offset interface{}) (sql string, err error) {
+ if limit != nil {
+ parsedLimit, err := s.parseInt(limit)
+ if err != nil {
+ return "", err
+ }
+ if parsedLimit >= 0 {
+ sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
+
+ if offset != nil {
+ parsedOffset, err := s.parseInt(offset)
+ if err != nil {
+ return "", err
+ }
+ if parsedOffset >= 0 {
+ sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
+ }
+ }
+ }
+ }
+ return
+}
+
+func (s mysql) HasForeignKey(tableName string, foreignKeyName string) bool {
+ var count int
+ currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+ s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_SCHEMA=? AND TABLE_NAME=? AND CONSTRAINT_NAME=? AND CONSTRAINT_TYPE='FOREIGN KEY'", currentDatabase, tableName, foreignKeyName).Scan(&count)
+ return count > 0
+}
+
+func (s mysql) HasTable(tableName string) bool {
+ currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+ var name string
+ // allow mysql database name with '-' character
+ if err := s.db.QueryRow(fmt.Sprintf("SHOW TABLES FROM `%s` WHERE `Tables_in_%s` = ?", currentDatabase, currentDatabase), tableName).Scan(&name); err != nil {
+ if err == sql.ErrNoRows {
+ return false
+ }
+ panic(err)
+ } else {
+ return true
+ }
+}
+
+func (s mysql) HasIndex(tableName string, indexName string) bool {
+ currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+ if rows, err := s.db.Query(fmt.Sprintf("SHOW INDEXES FROM `%s` FROM `%s` WHERE Key_name = ?", tableName, currentDatabase), indexName); err != nil {
+ panic(err)
+ } else {
+ defer rows.Close()
+ return rows.Next()
+ }
+}
+
+func (s mysql) HasColumn(tableName string, columnName string) bool {
+ currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
+ if rows, err := s.db.Query(fmt.Sprintf("SHOW COLUMNS FROM `%s` FROM `%s` WHERE Field = ?", tableName, currentDatabase), columnName); err != nil {
+ panic(err)
+ } else {
+ defer rows.Close()
+ return rows.Next()
+ }
+}
+
+func (s mysql) CurrentDatabase() (name string) {
+ s.db.QueryRow("SELECT DATABASE()").Scan(&name)
+ return
+}
+
+func (mysql) SelectFromDummyTable() string {
+ return "FROM DUAL"
+}
+
+func (s mysql) BuildKeyName(kind, tableName string, fields ...string) string {
+ keyName := s.commonDialect.BuildKeyName(kind, tableName, fields...)
+ if utf8.RuneCountInString(keyName) <= 64 {
+ return keyName
+ }
+ h := sha1.New()
+ h.Write([]byte(keyName))
+ bs := h.Sum(nil)
+
+ // sha1 is 40 characters, keep first 24 characters of destination
+ destRunes := []rune(keyNameRegex.ReplaceAllString(fields[0], "_"))
+ if len(destRunes) > 24 {
+ destRunes = destRunes[:24]
+ }
+
+ return fmt.Sprintf("%s%x", string(destRunes), bs)
+}
+
+// NormalizeIndexAndColumn returns index name and column name for specify an index prefix length if needed
+func (mysql) NormalizeIndexAndColumn(indexName, columnName string) (string, string) {
+ submatch := mysqlIndexRegex.FindStringSubmatch(indexName)
+ if len(submatch) != 3 {
+ return indexName, columnName
+ }
+ indexName = submatch[1]
+ columnName = fmt.Sprintf("%s(%s)", columnName, submatch[2])
+ return indexName, columnName
+}
+
+func (mysql) DefaultValueStr() string {
+ return "VALUES()"
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect_postgres.go b/vendor/github.com/jinzhu/gorm/dialect_postgres.go
new file mode 100644
index 0000000..d2df313
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/dialect_postgres.go
@@ -0,0 +1,147 @@
+package gorm
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+)
+
+type postgres struct {
+ commonDialect
+}
+
+func init() {
+ RegisterDialect("postgres", &postgres{})
+ RegisterDialect("cloudsqlpostgres", &postgres{})
+}
+
+func (postgres) GetName() string {
+ return "postgres"
+}
+
+func (postgres) BindVar(i int) string {
+ return fmt.Sprintf("$%v", i)
+}
+
+func (s *postgres) DataTypeOf(field *StructField) string {
+ var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
+
+ if sqlType == "" {
+ switch dataValue.Kind() {
+ case reflect.Bool:
+ sqlType = "boolean"
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uintptr:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "serial"
+ } else {
+ sqlType = "integer"
+ }
+ case reflect.Int64, reflect.Uint32, reflect.Uint64:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "bigserial"
+ } else {
+ sqlType = "bigint"
+ }
+ case reflect.Float32, reflect.Float64:
+ sqlType = "numeric"
+ case reflect.String:
+ if _, ok := field.TagSettingsGet("SIZE"); !ok {
+ size = 0 // if SIZE haven't been set, use `text` as the default type, as there are no performance different
+ }
+
+ if size > 0 && size < 65532 {
+ sqlType = fmt.Sprintf("varchar(%d)", size)
+ } else {
+ sqlType = "text"
+ }
+ case reflect.Struct:
+ if _, ok := dataValue.Interface().(time.Time); ok {
+ sqlType = "timestamp with time zone"
+ }
+ case reflect.Map:
+ if dataValue.Type().Name() == "Hstore" {
+ sqlType = "hstore"
+ }
+ default:
+ if IsByteArrayOrSlice(dataValue) {
+ sqlType = "bytea"
+
+ if isUUID(dataValue) {
+ sqlType = "uuid"
+ }
+
+ if isJSON(dataValue) {
+ sqlType = "jsonb"
+ }
+ }
+ }
+ }
+
+ if sqlType == "" {
+ panic(fmt.Sprintf("invalid sql type %s (%s) for postgres", dataValue.Type().Name(), dataValue.Kind().String()))
+ }
+
+ if strings.TrimSpace(additionalType) == "" {
+ return sqlType
+ }
+ return fmt.Sprintf("%v %v", sqlType, additionalType)
+}
+
+func (s postgres) HasIndex(tableName string, indexName string) bool {
+ var count int
+ s.db.QueryRow("SELECT count(*) FROM pg_indexes WHERE tablename = $1 AND indexname = $2 AND schemaname = CURRENT_SCHEMA()", tableName, indexName).Scan(&count)
+ return count > 0
+}
+
+func (s postgres) HasForeignKey(tableName string, foreignKeyName string) bool {
+ var count int
+ s.db.QueryRow("SELECT count(con.conname) FROM pg_constraint con WHERE $1::regclass::oid = con.conrelid AND con.conname = $2 AND con.contype='f'", tableName, foreignKeyName).Scan(&count)
+ return count > 0
+}
+
+func (s postgres) HasTable(tableName string) bool {
+ var count int
+ s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = $1 AND table_type = 'BASE TABLE' AND table_schema = CURRENT_SCHEMA()", tableName).Scan(&count)
+ return count > 0
+}
+
+func (s postgres) HasColumn(tableName string, columnName string) bool {
+ var count int
+ s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.columns WHERE table_name = $1 AND column_name = $2 AND table_schema = CURRENT_SCHEMA()", tableName, columnName).Scan(&count)
+ return count > 0
+}
+
+func (s postgres) CurrentDatabase() (name string) {
+ s.db.QueryRow("SELECT CURRENT_DATABASE()").Scan(&name)
+ return
+}
+
+func (s postgres) LastInsertIDOutputInterstitial(tableName, key string, columns []string) string {
+ return ""
+}
+
+func (s postgres) LastInsertIDReturningSuffix(tableName, key string) string {
+ return fmt.Sprintf("RETURNING %v.%v", tableName, key)
+}
+
+func (postgres) SupportLastInsertID() bool {
+ return false
+}
+
+func isUUID(value reflect.Value) bool {
+ if value.Kind() != reflect.Array || value.Type().Len() != 16 {
+ return false
+ }
+ typename := value.Type().Name()
+ lower := strings.ToLower(typename)
+ return "uuid" == lower || "guid" == lower
+}
+
+func isJSON(value reflect.Value) bool {
+ _, ok := value.Interface().(json.RawMessage)
+ return ok
+}
diff --git a/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go b/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go
new file mode 100644
index 0000000..5f96c36
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go
@@ -0,0 +1,107 @@
+package gorm
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+)
+
+type sqlite3 struct {
+ commonDialect
+}
+
+func init() {
+ RegisterDialect("sqlite3", &sqlite3{})
+}
+
+func (sqlite3) GetName() string {
+ return "sqlite3"
+}
+
+// Get Data Type for Sqlite Dialect
+func (s *sqlite3) DataTypeOf(field *StructField) string {
+ var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
+
+ if sqlType == "" {
+ switch dataValue.Kind() {
+ case reflect.Bool:
+ sqlType = "bool"
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "integer primary key autoincrement"
+ } else {
+ sqlType = "integer"
+ }
+ case reflect.Int64, reflect.Uint64:
+ if s.fieldCanAutoIncrement(field) {
+ field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
+ sqlType = "integer primary key autoincrement"
+ } else {
+ sqlType = "bigint"
+ }
+ case reflect.Float32, reflect.Float64:
+ sqlType = "real"
+ case reflect.String:
+ if size > 0 && size < 65532 {
+ sqlType = fmt.Sprintf("varchar(%d)", size)
+ } else {
+ sqlType = "text"
+ }
+ case reflect.Struct:
+ if _, ok := dataValue.Interface().(time.Time); ok {
+ sqlType = "datetime"
+ }
+ default:
+ if IsByteArrayOrSlice(dataValue) {
+ sqlType = "blob"
+ }
+ }
+ }
+
+ if sqlType == "" {
+ panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite3", dataValue.Type().Name(), dataValue.Kind().String()))
+ }
+
+ if strings.TrimSpace(additionalType) == "" {
+ return sqlType
+ }
+ return fmt.Sprintf("%v %v", sqlType, additionalType)
+}
+
+func (s sqlite3) HasIndex(tableName string, indexName string) bool {
+ var count int
+ s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count)
+ return count > 0
+}
+
+func (s sqlite3) HasTable(tableName string) bool {
+ var count int
+ s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count)
+ return count > 0
+}
+
+func (s sqlite3) HasColumn(tableName string, columnName string) bool {
+ var count int
+ s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');\n", columnName, columnName), tableName).Scan(&count)
+ return count > 0
+}
+
+func (s sqlite3) CurrentDatabase() (name string) {
+ var (
+ ifaces = make([]interface{}, 3)
+ pointers = make([]*string, 3)
+ i int
+ )
+ for i = 0; i < 3; i++ {
+ ifaces[i] = &pointers[i]
+ }
+ if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil {
+ return
+ }
+ if pointers[1] != nil {
+ name = *pointers[1]
+ }
+ return
+}
diff --git a/vendor/github.com/jinzhu/gorm/docker-compose.yml b/vendor/github.com/jinzhu/gorm/docker-compose.yml
new file mode 100644
index 0000000..79bf5fc
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/docker-compose.yml
@@ -0,0 +1,30 @@
+version: '3'
+
+services:
+ mysql:
+ image: 'mysql:latest'
+ ports:
+ - 9910:3306
+ environment:
+ - MYSQL_DATABASE=gorm
+ - MYSQL_USER=gorm
+ - MYSQL_PASSWORD=gorm
+ - MYSQL_RANDOM_ROOT_PASSWORD="yes"
+ postgres:
+ image: 'postgres:latest'
+ ports:
+ - 9920:5432
+ environment:
+ - POSTGRES_USER=gorm
+ - POSTGRES_DB=gorm
+ - POSTGRES_PASSWORD=gorm
+ mssql:
+ image: 'mcmoe/mssqldocker:latest'
+ ports:
+ - 9930:1433
+ environment:
+ - ACCEPT_EULA=Y
+ - SA_PASSWORD=LoremIpsum86
+ - MSSQL_DB=gorm
+ - MSSQL_USER=gorm
+ - MSSQL_PASSWORD=LoremIpsum86
diff --git a/vendor/github.com/jinzhu/gorm/errors.go b/vendor/github.com/jinzhu/gorm/errors.go
new file mode 100644
index 0000000..d5ef8d5
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/errors.go
@@ -0,0 +1,72 @@
+package gorm
+
+import (
+ "errors"
+ "strings"
+)
+
+var (
+ // ErrRecordNotFound returns a "record not found error". Occurs only when attempting to query the database with a struct; querying with a slice won't return this error
+ ErrRecordNotFound = errors.New("record not found")
+ // ErrInvalidSQL occurs when you attempt a query with invalid SQL
+ ErrInvalidSQL = errors.New("invalid SQL")
+ // ErrInvalidTransaction occurs when you are trying to `Commit` or `Rollback`
+ ErrInvalidTransaction = errors.New("no valid transaction")
+ // ErrCantStartTransaction can't start transaction when you are trying to start one with `Begin`
+ ErrCantStartTransaction = errors.New("can't start transaction")
+ // ErrUnaddressable unaddressable value
+ ErrUnaddressable = errors.New("using unaddressable value")
+)
+
+// Errors contains all happened errors
+type Errors []error
+
+// IsRecordNotFoundError returns true if error contains a RecordNotFound error
+func IsRecordNotFoundError(err error) bool {
+ if errs, ok := err.(Errors); ok {
+ for _, err := range errs {
+ if err == ErrRecordNotFound {
+ return true
+ }
+ }
+ }
+ return err == ErrRecordNotFound
+}
+
+// GetErrors gets all errors that have occurred and returns a slice of errors (Error type)
+func (errs Errors) GetErrors() []error {
+ return errs
+}
+
+// Add adds an error to a given slice of errors
+func (errs Errors) Add(newErrors ...error) Errors {
+ for _, err := range newErrors {
+ if err == nil {
+ continue
+ }
+
+ if errors, ok := err.(Errors); ok {
+ errs = errs.Add(errors...)
+ } else {
+ ok = true
+ for _, e := range errs {
+ if err == e {
+ ok = false
+ }
+ }
+ if ok {
+ errs = append(errs, err)
+ }
+ }
+ }
+ return errs
+}
+
+// Error takes a slice of all errors that have occurred and returns it as a formatted string
+func (errs Errors) Error() string {
+ var errors = []string{}
+ for _, e := range errs {
+ errors = append(errors, e.Error())
+ }
+ return strings.Join(errors, "; ")
+}
diff --git a/vendor/github.com/jinzhu/gorm/field.go b/vendor/github.com/jinzhu/gorm/field.go
new file mode 100644
index 0000000..acd06e2
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/field.go
@@ -0,0 +1,66 @@
+package gorm
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Field model field definition
+type Field struct {
+ *StructField
+ IsBlank bool
+ Field reflect.Value
+}
+
+// Set set a value to the field
+func (field *Field) Set(value interface{}) (err error) {
+ if !field.Field.IsValid() {
+ return errors.New("field value not valid")
+ }
+
+ if !field.Field.CanAddr() {
+ return ErrUnaddressable
+ }
+
+ reflectValue, ok := value.(reflect.Value)
+ if !ok {
+ reflectValue = reflect.ValueOf(value)
+ }
+
+ fieldValue := field.Field
+ if reflectValue.IsValid() {
+ if reflectValue.Type().ConvertibleTo(fieldValue.Type()) {
+ fieldValue.Set(reflectValue.Convert(fieldValue.Type()))
+ } else {
+ if fieldValue.Kind() == reflect.Ptr {
+ if fieldValue.IsNil() {
+ fieldValue.Set(reflect.New(field.Struct.Type.Elem()))
+ }
+ fieldValue = fieldValue.Elem()
+ }
+
+ if reflectValue.Type().ConvertibleTo(fieldValue.Type()) {
+ fieldValue.Set(reflectValue.Convert(fieldValue.Type()))
+ } else if scanner, ok := fieldValue.Addr().Interface().(sql.Scanner); ok {
+ v := reflectValue.Interface()
+ if valuer, ok := v.(driver.Valuer); ok {
+ if v, err = valuer.Value(); err == nil {
+ err = scanner.Scan(v)
+ }
+ } else {
+ err = scanner.Scan(v)
+ }
+ } else {
+ err = fmt.Errorf("could not convert argument of field %s from %s to %s", field.Name, reflectValue.Type(), fieldValue.Type())
+ }
+ }
+ } else {
+ field.Field.Set(reflect.Zero(field.Field.Type()))
+ }
+
+ field.IsBlank = isBlank(field.Field)
+ return err
+}
diff --git a/vendor/github.com/jinzhu/gorm/interface.go b/vendor/github.com/jinzhu/gorm/interface.go
new file mode 100644
index 0000000..fe64923
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/interface.go
@@ -0,0 +1,24 @@
+package gorm
+
+import (
+ "context"
+ "database/sql"
+)
+
+// SQLCommon is the minimal database connection functionality gorm requires. Implemented by *sql.DB.
+type SQLCommon interface {
+ Exec(query string, args ...interface{}) (sql.Result, error)
+ Prepare(query string) (*sql.Stmt, error)
+ Query(query string, args ...interface{}) (*sql.Rows, error)
+ QueryRow(query string, args ...interface{}) *sql.Row
+}
+
+type sqlDb interface {
+ Begin() (*sql.Tx, error)
+ BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
+}
+
+type sqlTx interface {
+ Commit() error
+ Rollback() error
+}
diff --git a/vendor/github.com/jinzhu/gorm/join_table_handler.go b/vendor/github.com/jinzhu/gorm/join_table_handler.go
new file mode 100644
index 0000000..a036d46
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/join_table_handler.go
@@ -0,0 +1,211 @@
+package gorm
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// JoinTableHandlerInterface is an interface for how to handle many2many relations
+type JoinTableHandlerInterface interface {
+ // initialize join table handler
+ Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type)
+ // Table return join table's table name
+ Table(db *DB) string
+ // Add create relationship in join table for source and destination
+ Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error
+ // Delete delete relationship in join table for sources
+ Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error
+ // JoinWith query with `Join` conditions
+ JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB
+ // SourceForeignKeys return source foreign keys
+ SourceForeignKeys() []JoinTableForeignKey
+ // DestinationForeignKeys return destination foreign keys
+ DestinationForeignKeys() []JoinTableForeignKey
+}
+
+// JoinTableForeignKey join table foreign key struct
+type JoinTableForeignKey struct {
+ DBName string
+ AssociationDBName string
+}
+
+// JoinTableSource is a struct that contains model type and foreign keys
+type JoinTableSource struct {
+ ModelType reflect.Type
+ ForeignKeys []JoinTableForeignKey
+}
+
+// JoinTableHandler default join table handler
+type JoinTableHandler struct {
+ TableName string `sql:"-"`
+ Source JoinTableSource `sql:"-"`
+ Destination JoinTableSource `sql:"-"`
+}
+
+// SourceForeignKeys return source foreign keys
+func (s *JoinTableHandler) SourceForeignKeys() []JoinTableForeignKey {
+ return s.Source.ForeignKeys
+}
+
+// DestinationForeignKeys return destination foreign keys
+func (s *JoinTableHandler) DestinationForeignKeys() []JoinTableForeignKey {
+ return s.Destination.ForeignKeys
+}
+
+// Setup initialize a default join table handler
+func (s *JoinTableHandler) Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) {
+ s.TableName = tableName
+
+ s.Source = JoinTableSource{ModelType: source}
+ s.Source.ForeignKeys = []JoinTableForeignKey{}
+ for idx, dbName := range relationship.ForeignFieldNames {
+ s.Source.ForeignKeys = append(s.Source.ForeignKeys, JoinTableForeignKey{
+ DBName: relationship.ForeignDBNames[idx],
+ AssociationDBName: dbName,
+ })
+ }
+
+ s.Destination = JoinTableSource{ModelType: destination}
+ s.Destination.ForeignKeys = []JoinTableForeignKey{}
+ for idx, dbName := range relationship.AssociationForeignFieldNames {
+ s.Destination.ForeignKeys = append(s.Destination.ForeignKeys, JoinTableForeignKey{
+ DBName: relationship.AssociationForeignDBNames[idx],
+ AssociationDBName: dbName,
+ })
+ }
+}
+
+// Table return join table's table name
+func (s JoinTableHandler) Table(db *DB) string {
+ return DefaultTableNameHandler(db, s.TableName)
+}
+
+func (s JoinTableHandler) updateConditionMap(conditionMap map[string]interface{}, db *DB, joinTableSources []JoinTableSource, sources ...interface{}) {
+ for _, source := range sources {
+ scope := db.NewScope(source)
+ modelType := scope.GetModelStruct().ModelType
+
+ for _, joinTableSource := range joinTableSources {
+ if joinTableSource.ModelType == modelType {
+ for _, foreignKey := range joinTableSource.ForeignKeys {
+ if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok {
+ conditionMap[foreignKey.DBName] = field.Field.Interface()
+ }
+ }
+ break
+ }
+ }
+ }
+}
+
+// Add create relationship in join table for source and destination
+func (s JoinTableHandler) Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error {
+ var (
+ scope = db.NewScope("")
+ conditionMap = map[string]interface{}{}
+ )
+
+ // Update condition map for source
+ s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source}, source)
+
+ // Update condition map for destination
+ s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Destination}, destination)
+
+ var assignColumns, binVars, conditions []string
+ var values []interface{}
+ for key, value := range conditionMap {
+ assignColumns = append(assignColumns, scope.Quote(key))
+ binVars = append(binVars, `?`)
+ conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key)))
+ values = append(values, value)
+ }
+
+ for _, value := range values {
+ values = append(values, value)
+ }
+
+ quotedTable := scope.Quote(handler.Table(db))
+ sql := fmt.Sprintf(
+ "INSERT INTO %v (%v) SELECT %v %v WHERE NOT EXISTS (SELECT * FROM %v WHERE %v)",
+ quotedTable,
+ strings.Join(assignColumns, ","),
+ strings.Join(binVars, ","),
+ scope.Dialect().SelectFromDummyTable(),
+ quotedTable,
+ strings.Join(conditions, " AND "),
+ )
+
+ return db.Exec(sql, values...).Error
+}
+
+// Delete delete relationship in join table for sources
+func (s JoinTableHandler) Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error {
+ var (
+ scope = db.NewScope(nil)
+ conditions []string
+ values []interface{}
+ conditionMap = map[string]interface{}{}
+ )
+
+ s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source, s.Destination}, sources...)
+
+ for key, value := range conditionMap {
+ conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key)))
+ values = append(values, value)
+ }
+
+ return db.Table(handler.Table(db)).Where(strings.Join(conditions, " AND "), values...).Delete("").Error
+}
+
+// JoinWith query with `Join` conditions
+func (s JoinTableHandler) JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB {
+ var (
+ scope = db.NewScope(source)
+ tableName = handler.Table(db)
+ quotedTableName = scope.Quote(tableName)
+ joinConditions []string
+ values []interface{}
+ )
+
+ if s.Source.ModelType == scope.GetModelStruct().ModelType {
+ destinationTableName := db.NewScope(reflect.New(s.Destination.ModelType).Interface()).QuotedTableName()
+ for _, foreignKey := range s.Destination.ForeignKeys {
+ joinConditions = append(joinConditions, fmt.Sprintf("%v.%v = %v.%v", quotedTableName, scope.Quote(foreignKey.DBName), destinationTableName, scope.Quote(foreignKey.AssociationDBName)))
+ }
+
+ var foreignDBNames []string
+ var foreignFieldNames []string
+
+ for _, foreignKey := range s.Source.ForeignKeys {
+ foreignDBNames = append(foreignDBNames, foreignKey.DBName)
+ if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok {
+ foreignFieldNames = append(foreignFieldNames, field.Name)
+ }
+ }
+
+ foreignFieldValues := scope.getColumnAsArray(foreignFieldNames, scope.Value)
+
+ var condString string
+ if len(foreignFieldValues) > 0 {
+ var quotedForeignDBNames []string
+ for _, dbName := range foreignDBNames {
+ quotedForeignDBNames = append(quotedForeignDBNames, tableName+"."+dbName)
+ }
+
+ condString = fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, quotedForeignDBNames), toQueryMarks(foreignFieldValues))
+
+ keys := scope.getColumnAsArray(foreignFieldNames, scope.Value)
+ values = append(values, toQueryValues(keys))
+ } else {
+ condString = fmt.Sprintf("1 <> 1")
+ }
+
+ return db.Joins(fmt.Sprintf("INNER JOIN %v ON %v", quotedTableName, strings.Join(joinConditions, " AND "))).
+ Where(condString, toQueryValues(foreignFieldValues)...)
+ }
+
+ db.Error = errors.New("wrong source type for join table handler")
+ return db
+}
diff --git a/vendor/github.com/jinzhu/gorm/logger.go b/vendor/github.com/jinzhu/gorm/logger.go
new file mode 100644
index 0000000..88e167d
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/logger.go
@@ -0,0 +1,141 @@
+package gorm
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "time"
+ "unicode"
+)
+
+var (
+ defaultLogger = Logger{log.New(os.Stdout, "\r\n", 0)}
+ sqlRegexp = regexp.MustCompile(`\?`)
+ numericPlaceHolderRegexp = regexp.MustCompile(`\$\d+`)
+)
+
+func isPrintable(s string) bool {
+ for _, r := range s {
+ if !unicode.IsPrint(r) {
+ return false
+ }
+ }
+ return true
+}
+
+var LogFormatter = func(values ...interface{}) (messages []interface{}) {
+ if len(values) > 1 {
+ var (
+ sql string
+ formattedValues []string
+ level = values[0]
+ currentTime = "\n\033[33m[" + NowFunc().Format("2006-01-02 15:04:05") + "]\033[0m"
+ source = fmt.Sprintf("\033[35m(%v)\033[0m", values[1])
+ )
+
+ messages = []interface{}{source, currentTime}
+
+ if len(values) == 2 {
+ //remove the line break
+ currentTime = currentTime[1:]
+ //remove the brackets
+ source = fmt.Sprintf("\033[35m%v\033[0m", values[1])
+
+ messages = []interface{}{currentTime, source}
+ }
+
+ if level == "sql" {
+ // duration
+ messages = append(messages, fmt.Sprintf(" \033[36;1m[%.2fms]\033[0m ", float64(values[2].(time.Duration).Nanoseconds()/1e4)/100.0))
+ // sql
+
+ for _, value := range values[4].([]interface{}) {
+ indirectValue := reflect.Indirect(reflect.ValueOf(value))
+ if indirectValue.IsValid() {
+ value = indirectValue.Interface()
+ if t, ok := value.(time.Time); ok {
+ if t.IsZero() {
+ formattedValues = append(formattedValues, fmt.Sprintf("'%v'", "0000-00-00 00:00:00"))
+ } else {
+ formattedValues = append(formattedValues, fmt.Sprintf("'%v'", t.Format("2006-01-02 15:04:05")))
+ }
+ } else if b, ok := value.([]byte); ok {
+ if str := string(b); isPrintable(str) {
+ formattedValues = append(formattedValues, fmt.Sprintf("'%v'", str))
+ } else {
+ formattedValues = append(formattedValues, "''")
+ }
+ } else if r, ok := value.(driver.Valuer); ok {
+ if value, err := r.Value(); err == nil && value != nil {
+ formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value))
+ } else {
+ formattedValues = append(formattedValues, "NULL")
+ }
+ } else {
+ switch value.(type) {
+ case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool:
+ formattedValues = append(formattedValues, fmt.Sprintf("%v", value))
+ default:
+ formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value))
+ }
+ }
+ } else {
+ formattedValues = append(formattedValues, "NULL")
+ }
+ }
+
+ // differentiate between $n placeholders or else treat like ?
+ if numericPlaceHolderRegexp.MatchString(values[3].(string)) {
+ sql = values[3].(string)
+ for index, value := range formattedValues {
+ placeholder := fmt.Sprintf(`\$%d([^\d]|$)`, index+1)
+ sql = regexp.MustCompile(placeholder).ReplaceAllString(sql, value+"$1")
+ }
+ } else {
+ formattedValuesLength := len(formattedValues)
+ for index, value := range sqlRegexp.Split(values[3].(string), -1) {
+ sql += value
+ if index < formattedValuesLength {
+ sql += formattedValues[index]
+ }
+ }
+ }
+
+ messages = append(messages, sql)
+ messages = append(messages, fmt.Sprintf(" \n\033[36;31m[%v]\033[0m ", strconv.FormatInt(values[5].(int64), 10)+" rows affected or returned "))
+ } else {
+ messages = append(messages, "\033[31;1m")
+ messages = append(messages, values[2:]...)
+ messages = append(messages, "\033[0m")
+ }
+ }
+
+ return
+}
+
+type logger interface {
+ Print(v ...interface{})
+}
+
+// LogWriter log writer interface
+type LogWriter interface {
+ Println(v ...interface{})
+}
+
+// Logger default logger
+type Logger struct {
+ LogWriter
+}
+
+// Print format & print log
+func (logger Logger) Print(values ...interface{}) {
+ logger.Println(LogFormatter(values...)...)
+}
+
+type nopLogger struct{}
+
+func (nopLogger) Print(values ...interface{}) {}
diff --git a/vendor/github.com/jinzhu/gorm/main.go b/vendor/github.com/jinzhu/gorm/main.go
new file mode 100644
index 0000000..466e80c
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/main.go
@@ -0,0 +1,886 @@
+package gorm
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+)
+
+// DB contains information for current db connection
+type DB struct {
+ sync.RWMutex
+ Value interface{}
+ Error error
+ RowsAffected int64
+
+ // single db
+ db SQLCommon
+ blockGlobalUpdate bool
+ logMode logModeValue
+ logger logger
+ search *search
+ values sync.Map
+
+ // global db
+ parent *DB
+ callbacks *Callback
+ dialect Dialect
+ singularTable bool
+
+ // function to be used to override the creating of a new timestamp
+ nowFuncOverride func() time.Time
+}
+
+type logModeValue int
+
+const (
+ defaultLogMode logModeValue = iota
+ noLogMode
+ detailedLogMode
+)
+
+// Open initialize a new db connection, need to import driver first, e.g:
+//
+// import _ "github.com/go-sql-driver/mysql"
+// func main() {
+// db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True&loc=Local")
+// }
+// GORM has wrapped some drivers, for easier to remember driver's import path, so you could import the mysql driver with
+// import _ "github.com/jinzhu/gorm/dialects/mysql"
+// // import _ "github.com/jinzhu/gorm/dialects/postgres"
+// // import _ "github.com/jinzhu/gorm/dialects/sqlite"
+// // import _ "github.com/jinzhu/gorm/dialects/mssql"
+func Open(dialect string, args ...interface{}) (db *DB, err error) {
+ if len(args) == 0 {
+ err = errors.New("invalid database source")
+ return nil, err
+ }
+ var source string
+ var dbSQL SQLCommon
+ var ownDbSQL bool
+
+ switch value := args[0].(type) {
+ case string:
+ var driver = dialect
+ if len(args) == 1 {
+ source = value
+ } else if len(args) >= 2 {
+ driver = value
+ source = args[1].(string)
+ }
+ dbSQL, err = sql.Open(driver, source)
+ ownDbSQL = true
+ case SQLCommon:
+ dbSQL = value
+ ownDbSQL = false
+ default:
+ return nil, fmt.Errorf("invalid database source: %v is not a valid type", value)
+ }
+
+ db = &DB{
+ db: dbSQL,
+ logger: defaultLogger,
+ callbacks: DefaultCallback,
+ dialect: newDialect(dialect, dbSQL),
+ }
+ db.parent = db
+ if err != nil {
+ return
+ }
+ // Send a ping to make sure the database connection is alive.
+ if d, ok := dbSQL.(*sql.DB); ok {
+ if err = d.Ping(); err != nil && ownDbSQL {
+ d.Close()
+ }
+ }
+ return
+}
+
+// New clone a new db connection without search conditions
+func (s *DB) New() *DB {
+ clone := s.clone()
+ clone.search = nil
+ clone.Value = nil
+ return clone
+}
+
+type closer interface {
+ Close() error
+}
+
+// Close close current db connection. If database connection is not an io.Closer, returns an error.
+func (s *DB) Close() error {
+ if db, ok := s.parent.db.(closer); ok {
+ return db.Close()
+ }
+ return errors.New("can't close current db")
+}
+
+// DB get `*sql.DB` from current connection
+// If the underlying database connection is not a *sql.DB, returns nil
+func (s *DB) DB() *sql.DB {
+ db, ok := s.db.(*sql.DB)
+ if !ok {
+ panic("can't support full GORM on currently status, maybe this is a TX instance.")
+ }
+ return db
+}
+
+// CommonDB return the underlying `*sql.DB` or `*sql.Tx` instance, mainly intended to allow coexistence with legacy non-GORM code.
+func (s *DB) CommonDB() SQLCommon {
+ return s.db
+}
+
+// Dialect get dialect
+func (s *DB) Dialect() Dialect {
+ return s.dialect
+}
+
+// Callback return `Callbacks` container, you could add/change/delete callbacks with it
+// db.Callback().Create().Register("update_created_at", updateCreated)
+// Refer https://jinzhu.github.io/gorm/development.html#callbacks
+func (s *DB) Callback() *Callback {
+ s.parent.callbacks = s.parent.callbacks.clone(s.logger)
+ return s.parent.callbacks
+}
+
+// SetLogger replace default logger
+func (s *DB) SetLogger(log logger) {
+ s.logger = log
+}
+
+// LogMode set log mode, `true` for detailed logs, `false` for no log, default, will only print error logs
+func (s *DB) LogMode(enable bool) *DB {
+ if enable {
+ s.logMode = detailedLogMode
+ } else {
+ s.logMode = noLogMode
+ }
+ return s
+}
+
+// SetNowFuncOverride set the function to be used when creating a new timestamp
+func (s *DB) SetNowFuncOverride(nowFuncOverride func() time.Time) *DB {
+ s.nowFuncOverride = nowFuncOverride
+ return s
+}
+
+// Get a new timestamp, using the provided nowFuncOverride on the DB instance if set,
+// otherwise defaults to the global NowFunc()
+func (s *DB) nowFunc() time.Time {
+ if s.nowFuncOverride != nil {
+ return s.nowFuncOverride()
+ }
+
+ return NowFunc()
+}
+
+// BlockGlobalUpdate if true, generates an error on update/delete without where clause.
+// This is to prevent eventual error with empty objects updates/deletions
+func (s *DB) BlockGlobalUpdate(enable bool) *DB {
+ s.blockGlobalUpdate = enable
+ return s
+}
+
+// HasBlockGlobalUpdate return state of block
+func (s *DB) HasBlockGlobalUpdate() bool {
+ return s.blockGlobalUpdate
+}
+
+// SingularTable use singular table by default
+func (s *DB) SingularTable(enable bool) {
+ s.parent.Lock()
+ defer s.parent.Unlock()
+ s.parent.singularTable = enable
+}
+
+// NewScope create a scope for current operation
+func (s *DB) NewScope(value interface{}) *Scope {
+ dbClone := s.clone()
+ dbClone.Value = value
+ scope := &Scope{db: dbClone, Value: value}
+ if s.search != nil {
+ scope.Search = s.search.clone()
+ } else {
+ scope.Search = &search{}
+ }
+ return scope
+}
+
+// QueryExpr returns the query as SqlExpr object
+func (s *DB) QueryExpr() *SqlExpr {
+ scope := s.NewScope(s.Value)
+ scope.InstanceSet("skip_bindvar", true)
+ scope.prepareQuerySQL()
+
+ return Expr(scope.SQL, scope.SQLVars...)
+}
+
+// SubQuery returns the query as sub query
+func (s *DB) SubQuery() *SqlExpr {
+ scope := s.NewScope(s.Value)
+ scope.InstanceSet("skip_bindvar", true)
+ scope.prepareQuerySQL()
+
+ return Expr(fmt.Sprintf("(%v)", scope.SQL), scope.SQLVars...)
+}
+
+// Where return a new relation, filter records with given conditions, accepts `map`, `struct` or `string` as conditions, refer http://jinzhu.github.io/gorm/crud.html#query
+func (s *DB) Where(query interface{}, args ...interface{}) *DB {
+ return s.clone().search.Where(query, args...).db
+}
+
+// Or filter records that match before conditions or this one, similar to `Where`
+func (s *DB) Or(query interface{}, args ...interface{}) *DB {
+ return s.clone().search.Or(query, args...).db
+}
+
+// Not filter records that don't match current conditions, similar to `Where`
+func (s *DB) Not(query interface{}, args ...interface{}) *DB {
+ return s.clone().search.Not(query, args...).db
+}
+
+// Limit specify the number of records to be retrieved
+func (s *DB) Limit(limit interface{}) *DB {
+ return s.clone().search.Limit(limit).db
+}
+
+// Offset specify the number of records to skip before starting to return the records
+func (s *DB) Offset(offset interface{}) *DB {
+ return s.clone().search.Offset(offset).db
+}
+
+// Order specify order when retrieve records from database, set reorder to `true` to overwrite defined conditions
+// db.Order("name DESC")
+// db.Order("name DESC", true) // reorder
+// db.Order(gorm.Expr("name = ? DESC", "first")) // sql expression
+func (s *DB) Order(value interface{}, reorder ...bool) *DB {
+ return s.clone().search.Order(value, reorder...).db
+}
+
+// Select specify fields that you want to retrieve from database when querying, by default, will select all fields;
+// When creating/updating, specify fields that you want to save to database
+func (s *DB) Select(query interface{}, args ...interface{}) *DB {
+ return s.clone().search.Select(query, args...).db
+}
+
+// Omit specify fields that you want to ignore when saving to database for creating, updating
+func (s *DB) Omit(columns ...string) *DB {
+ return s.clone().search.Omit(columns...).db
+}
+
+// Group specify the group method on the find
+func (s *DB) Group(query string) *DB {
+ return s.clone().search.Group(query).db
+}
+
+// Having specify HAVING conditions for GROUP BY
+func (s *DB) Having(query interface{}, values ...interface{}) *DB {
+ return s.clone().search.Having(query, values...).db
+}
+
+// Joins specify Joins conditions
+// db.Joins("JOIN emails ON emails.user_id = users.id AND emails.email = ?", "jinzhu@example.org").Find(&user)
+func (s *DB) Joins(query string, args ...interface{}) *DB {
+ return s.clone().search.Joins(query, args...).db
+}
+
+// Scopes pass current database connection to arguments `func(*DB) *DB`, which could be used to add conditions dynamically
+// func AmountGreaterThan1000(db *gorm.DB) *gorm.DB {
+// return db.Where("amount > ?", 1000)
+// }
+//
+// func OrderStatus(status []string) func (db *gorm.DB) *gorm.DB {
+// return func (db *gorm.DB) *gorm.DB {
+// return db.Scopes(AmountGreaterThan1000).Where("status in (?)", status)
+// }
+// }
+//
+// db.Scopes(AmountGreaterThan1000, OrderStatus([]string{"paid", "shipped"})).Find(&orders)
+// Refer https://jinzhu.github.io/gorm/crud.html#scopes
+func (s *DB) Scopes(funcs ...func(*DB) *DB) *DB {
+ for _, f := range funcs {
+ s = f(s)
+ }
+ return s
+}
+
+// Unscoped return all record including deleted record, refer Soft Delete https://jinzhu.github.io/gorm/crud.html#soft-delete
+func (s *DB) Unscoped() *DB {
+ return s.clone().search.unscoped().db
+}
+
+// Attrs initialize struct with argument if record not found with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate
+func (s *DB) Attrs(attrs ...interface{}) *DB {
+ return s.clone().search.Attrs(attrs...).db
+}
+
+// Assign assign result with argument regardless it is found or not with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate
+func (s *DB) Assign(attrs ...interface{}) *DB {
+ return s.clone().search.Assign(attrs...).db
+}
+
+// First find first record that match given conditions, order by primary key
+func (s *DB) First(out interface{}, where ...interface{}) *DB {
+ newScope := s.NewScope(out)
+ newScope.Search.Limit(1)
+
+ return newScope.Set("gorm:order_by_primary_key", "ASC").
+ inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Take return a record that match given conditions, the order will depend on the database implementation
+func (s *DB) Take(out interface{}, where ...interface{}) *DB {
+ newScope := s.NewScope(out)
+ newScope.Search.Limit(1)
+ return newScope.inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Last find last record that match given conditions, order by primary key
+func (s *DB) Last(out interface{}, where ...interface{}) *DB {
+ newScope := s.NewScope(out)
+ newScope.Search.Limit(1)
+ return newScope.Set("gorm:order_by_primary_key", "DESC").
+ inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Find find records that match given conditions
+func (s *DB) Find(out interface{}, where ...interface{}) *DB {
+ return s.NewScope(out).inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
+}
+
+//Preloads preloads relations, don`t touch out
+func (s *DB) Preloads(out interface{}) *DB {
+ return s.NewScope(out).InstanceSet("gorm:only_preload", 1).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Scan scan value to a struct
+func (s *DB) Scan(dest interface{}) *DB {
+ return s.NewScope(s.Value).Set("gorm:query_destination", dest).callCallbacks(s.parent.callbacks.queries).db
+}
+
+// Row return `*sql.Row` with given conditions
+func (s *DB) Row() *sql.Row {
+ return s.NewScope(s.Value).row()
+}
+
+// Rows return `*sql.Rows` with given conditions
+func (s *DB) Rows() (*sql.Rows, error) {
+ return s.NewScope(s.Value).rows()
+}
+
+// ScanRows scan `*sql.Rows` to give struct
+func (s *DB) ScanRows(rows *sql.Rows, result interface{}) error {
+ var (
+ scope = s.NewScope(result)
+ clone = scope.db
+ columns, err = rows.Columns()
+ )
+
+ if clone.AddError(err) == nil {
+ scope.scan(rows, columns, scope.Fields())
+ }
+
+ return clone.Error
+}
+
+// Pluck used to query single column from a model as a map
+// var ages []int64
+// db.Find(&users).Pluck("age", &ages)
+func (s *DB) Pluck(column string, value interface{}) *DB {
+ return s.NewScope(s.Value).pluck(column, value).db
+}
+
+// Count get how many records for a model
+func (s *DB) Count(value interface{}) *DB {
+ return s.NewScope(s.Value).count(value).db
+}
+
+// Related get related associations
+func (s *DB) Related(value interface{}, foreignKeys ...string) *DB {
+ return s.NewScope(s.Value).related(value, foreignKeys...).db
+}
+
+// FirstOrInit find first matched record or initialize a new one with given conditions (only works with struct, map conditions)
+// https://jinzhu.github.io/gorm/crud.html#firstorinit
+func (s *DB) FirstOrInit(out interface{}, where ...interface{}) *DB {
+ c := s.clone()
+ if result := c.First(out, where...); result.Error != nil {
+ if !result.RecordNotFound() {
+ return result
+ }
+ c.NewScope(out).inlineCondition(where...).initialize()
+ } else {
+ c.NewScope(out).updatedAttrsWithValues(c.search.assignAttrs)
+ }
+ return c
+}
+
+// FirstOrCreate find first matched record or create a new one with given conditions (only works with struct, map conditions)
+// https://jinzhu.github.io/gorm/crud.html#firstorcreate
+func (s *DB) FirstOrCreate(out interface{}, where ...interface{}) *DB {
+ c := s.clone()
+ if result := s.First(out, where...); result.Error != nil {
+ if !result.RecordNotFound() {
+ return result
+ }
+ return c.NewScope(out).inlineCondition(where...).initialize().callCallbacks(c.parent.callbacks.creates).db
+ } else if len(c.search.assignAttrs) > 0 {
+ return c.NewScope(out).InstanceSet("gorm:update_interface", c.search.assignAttrs).callCallbacks(c.parent.callbacks.updates).db
+ }
+ return c
+}
+
+// Update update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
+// WARNING when update with struct, GORM will not update fields that with zero value
+func (s *DB) Update(attrs ...interface{}) *DB {
+ return s.Updates(toSearchableMap(attrs...), true)
+}
+
+// Updates update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
+func (s *DB) Updates(values interface{}, ignoreProtectedAttrs ...bool) *DB {
+ return s.NewScope(s.Value).
+ Set("gorm:ignore_protected_attrs", len(ignoreProtectedAttrs) > 0).
+ InstanceSet("gorm:update_interface", values).
+ callCallbacks(s.parent.callbacks.updates).db
+}
+
+// UpdateColumn update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
+func (s *DB) UpdateColumn(attrs ...interface{}) *DB {
+ return s.UpdateColumns(toSearchableMap(attrs...))
+}
+
+// UpdateColumns update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
+func (s *DB) UpdateColumns(values interface{}) *DB {
+ return s.NewScope(s.Value).
+ Set("gorm:update_column", true).
+ Set("gorm:save_associations", false).
+ InstanceSet("gorm:update_interface", values).
+ callCallbacks(s.parent.callbacks.updates).db
+}
+
+// Save update value in database, if the value doesn't have primary key, will insert it
+func (s *DB) Save(value interface{}) *DB {
+ scope := s.NewScope(value)
+ if !scope.PrimaryKeyZero() {
+ newDB := scope.callCallbacks(s.parent.callbacks.updates).db
+ if newDB.Error == nil && newDB.RowsAffected == 0 {
+ return s.New().Table(scope.TableName()).FirstOrCreate(value)
+ }
+ return newDB
+ }
+ return scope.callCallbacks(s.parent.callbacks.creates).db
+}
+
+// Create insert the value into database
+func (s *DB) Create(value interface{}) *DB {
+ scope := s.NewScope(value)
+ return scope.callCallbacks(s.parent.callbacks.creates).db
+}
+
+// Delete delete value match given conditions, if the value has primary key, then will including the primary key as condition
+// WARNING If model has DeletedAt field, GORM will only set field DeletedAt's value to current time
+func (s *DB) Delete(value interface{}, where ...interface{}) *DB {
+ return s.NewScope(value).inlineCondition(where...).callCallbacks(s.parent.callbacks.deletes).db
+}
+
+// Raw use raw sql as conditions, won't run it unless invoked by other methods
+// db.Raw("SELECT name, age FROM users WHERE name = ?", 3).Scan(&result)
+func (s *DB) Raw(sql string, values ...interface{}) *DB {
+ return s.clone().search.Raw(true).Where(sql, values...).db
+}
+
+// Exec execute raw sql
+func (s *DB) Exec(sql string, values ...interface{}) *DB {
+ scope := s.NewScope(nil)
+ generatedSQL := scope.buildCondition(map[string]interface{}{"query": sql, "args": values}, true)
+ generatedSQL = strings.TrimSuffix(strings.TrimPrefix(generatedSQL, "("), ")")
+ scope.Raw(generatedSQL)
+ return scope.Exec().db
+}
+
+// Model specify the model you would like to run db operations
+// // update all users's name to `hello`
+// db.Model(&User{}).Update("name", "hello")
+// // if user's primary key is non-blank, will use it as condition, then will only update the user's name to `hello`
+// db.Model(&user).Update("name", "hello")
+func (s *DB) Model(value interface{}) *DB {
+ c := s.clone()
+ c.Value = value
+ return c
+}
+
+// Table specify the table you would like to run db operations
+func (s *DB) Table(name string) *DB {
+ clone := s.clone()
+ clone.search.Table(name)
+ clone.Value = nil
+ return clone
+}
+
+// Debug start debug mode
+func (s *DB) Debug() *DB {
+ return s.clone().LogMode(true)
+}
+
+// Transaction start a transaction as a block,
+// return error will rollback, otherwise to commit.
+func (s *DB) Transaction(fc func(tx *DB) error) (err error) {
+
+ if _, ok := s.db.(*sql.Tx); ok {
+ return fc(s)
+ }
+
+ panicked := true
+ tx := s.Begin()
+ defer func() {
+ // Make sure to rollback when panic, Block error or Commit error
+ if panicked || err != nil {
+ tx.Rollback()
+ }
+ }()
+
+ err = fc(tx)
+
+ if err == nil {
+ err = tx.Commit().Error
+ }
+
+ panicked = false
+ return
+}
+
+// Begin begins a transaction
+func (s *DB) Begin() *DB {
+ return s.BeginTx(context.Background(), &sql.TxOptions{})
+}
+
+// BeginTx begins a transaction with options
+func (s *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) *DB {
+ c := s.clone()
+ if db, ok := c.db.(sqlDb); ok && db != nil {
+ tx, err := db.BeginTx(ctx, opts)
+ c.db = interface{}(tx).(SQLCommon)
+
+ c.dialect.SetDB(c.db)
+ c.AddError(err)
+ } else {
+ c.AddError(ErrCantStartTransaction)
+ }
+ return c
+}
+
+// Commit commit a transaction
+func (s *DB) Commit() *DB {
+ var emptySQLTx *sql.Tx
+ if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
+ s.AddError(db.Commit())
+ } else {
+ s.AddError(ErrInvalidTransaction)
+ }
+ return s
+}
+
+// Rollback rollback a transaction
+func (s *DB) Rollback() *DB {
+ var emptySQLTx *sql.Tx
+ if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
+ if err := db.Rollback(); err != nil && err != sql.ErrTxDone {
+ s.AddError(err)
+ }
+ } else {
+ s.AddError(ErrInvalidTransaction)
+ }
+ return s
+}
+
+// RollbackUnlessCommitted rollback a transaction if it has not yet been
+// committed.
+func (s *DB) RollbackUnlessCommitted() *DB {
+ var emptySQLTx *sql.Tx
+ if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
+ err := db.Rollback()
+ // Ignore the error indicating that the transaction has already
+ // been committed.
+ if err != sql.ErrTxDone {
+ s.AddError(err)
+ }
+ } else {
+ s.AddError(ErrInvalidTransaction)
+ }
+ return s
+}
+
+// NewRecord check if value's primary key is blank
+func (s *DB) NewRecord(value interface{}) bool {
+ return s.NewScope(value).PrimaryKeyZero()
+}
+
+// RecordNotFound check if returning ErrRecordNotFound error
+func (s *DB) RecordNotFound() bool {
+ for _, err := range s.GetErrors() {
+ if err == ErrRecordNotFound {
+ return true
+ }
+ }
+ return false
+}
+
+// CreateTable create table for models
+func (s *DB) CreateTable(models ...interface{}) *DB {
+ db := s.Unscoped()
+ for _, model := range models {
+ db = db.NewScope(model).createTable().db
+ }
+ return db
+}
+
+// DropTable drop table for models
+func (s *DB) DropTable(values ...interface{}) *DB {
+ db := s.clone()
+ for _, value := range values {
+ if tableName, ok := value.(string); ok {
+ db = db.Table(tableName)
+ }
+
+ db = db.NewScope(value).dropTable().db
+ }
+ return db
+}
+
+// DropTableIfExists drop table if it is exist
+func (s *DB) DropTableIfExists(values ...interface{}) *DB {
+ db := s.clone()
+ for _, value := range values {
+ if s.HasTable(value) {
+ db.AddError(s.DropTable(value).Error)
+ }
+ }
+ return db
+}
+
+// HasTable check has table or not
+func (s *DB) HasTable(value interface{}) bool {
+ var (
+ scope = s.NewScope(value)
+ tableName string
+ )
+
+ if name, ok := value.(string); ok {
+ tableName = name
+ } else {
+ tableName = scope.TableName()
+ }
+
+ has := scope.Dialect().HasTable(tableName)
+ s.AddError(scope.db.Error)
+ return has
+}
+
+// AutoMigrate run auto migration for given models, will only add missing fields, won't delete/change current data
+func (s *DB) AutoMigrate(values ...interface{}) *DB {
+ db := s.Unscoped()
+ for _, value := range values {
+ db = db.NewScope(value).autoMigrate().db
+ }
+ return db
+}
+
+// ModifyColumn modify column to type
+func (s *DB) ModifyColumn(column string, typ string) *DB {
+ scope := s.NewScope(s.Value)
+ scope.modifyColumn(column, typ)
+ return scope.db
+}
+
+// DropColumn drop a column
+func (s *DB) DropColumn(column string) *DB {
+ scope := s.NewScope(s.Value)
+ scope.dropColumn(column)
+ return scope.db
+}
+
+// AddIndex add index for columns with given name
+func (s *DB) AddIndex(indexName string, columns ...string) *DB {
+ scope := s.Unscoped().NewScope(s.Value)
+ scope.addIndex(false, indexName, columns...)
+ return scope.db
+}
+
+// AddUniqueIndex add unique index for columns with given name
+func (s *DB) AddUniqueIndex(indexName string, columns ...string) *DB {
+ scope := s.Unscoped().NewScope(s.Value)
+ scope.addIndex(true, indexName, columns...)
+ return scope.db
+}
+
+// RemoveIndex remove index with name
+func (s *DB) RemoveIndex(indexName string) *DB {
+ scope := s.NewScope(s.Value)
+ scope.removeIndex(indexName)
+ return scope.db
+}
+
+// AddForeignKey Add foreign key to the given scope, e.g:
+// db.Model(&User{}).AddForeignKey("city_id", "cities(id)", "RESTRICT", "RESTRICT")
+func (s *DB) AddForeignKey(field string, dest string, onDelete string, onUpdate string) *DB {
+ scope := s.NewScope(s.Value)
+ scope.addForeignKey(field, dest, onDelete, onUpdate)
+ return scope.db
+}
+
+// RemoveForeignKey Remove foreign key from the given scope, e.g:
+// db.Model(&User{}).RemoveForeignKey("city_id", "cities(id)")
+func (s *DB) RemoveForeignKey(field string, dest string) *DB {
+ scope := s.clone().NewScope(s.Value)
+ scope.removeForeignKey(field, dest)
+ return scope.db
+}
+
+// Association start `Association Mode` to handler relations things easir in that mode, refer: https://jinzhu.github.io/gorm/associations.html#association-mode
+func (s *DB) Association(column string) *Association {
+ var err error
+ var scope = s.Set("gorm:association:source", s.Value).NewScope(s.Value)
+
+ if primaryField := scope.PrimaryField(); primaryField.IsBlank {
+ err = errors.New("primary key can't be nil")
+ } else {
+ if field, ok := scope.FieldByName(column); ok {
+ if field.Relationship == nil || len(field.Relationship.ForeignFieldNames) == 0 {
+ err = fmt.Errorf("invalid association %v for %v", column, scope.IndirectValue().Type())
+ } else {
+ return &Association{scope: scope, column: column, field: field}
+ }
+ } else {
+ err = fmt.Errorf("%v doesn't have column %v", scope.IndirectValue().Type(), column)
+ }
+ }
+
+ return &Association{Error: err}
+}
+
+// Preload preload associations with given conditions
+// db.Preload("Orders", "state NOT IN (?)", "cancelled").Find(&users)
+func (s *DB) Preload(column string, conditions ...interface{}) *DB {
+ return s.clone().search.Preload(column, conditions...).db
+}
+
+// Set set setting by name, which could be used in callbacks, will clone a new db, and update its setting
+func (s *DB) Set(name string, value interface{}) *DB {
+ return s.clone().InstantSet(name, value)
+}
+
+// InstantSet instant set setting, will affect current db
+func (s *DB) InstantSet(name string, value interface{}) *DB {
+ s.values.Store(name, value)
+ return s
+}
+
+// Get get setting by name
+func (s *DB) Get(name string) (value interface{}, ok bool) {
+ value, ok = s.values.Load(name)
+ return
+}
+
+// SetJoinTableHandler set a model's join table handler for a relation
+func (s *DB) SetJoinTableHandler(source interface{}, column string, handler JoinTableHandlerInterface) {
+ scope := s.NewScope(source)
+ for _, field := range scope.GetModelStruct().StructFields {
+ if field.Name == column || field.DBName == column {
+ if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" {
+ source := (&Scope{Value: source}).GetModelStruct().ModelType
+ destination := (&Scope{Value: reflect.New(field.Struct.Type).Interface()}).GetModelStruct().ModelType
+ handler.Setup(field.Relationship, many2many, source, destination)
+ field.Relationship.JoinTableHandler = handler
+ if table := handler.Table(s); scope.Dialect().HasTable(table) {
+ s.Table(table).AutoMigrate(handler)
+ }
+ }
+ }
+ }
+}
+
+// AddError add error to the db
+func (s *DB) AddError(err error) error {
+ if err != nil {
+ if err != ErrRecordNotFound {
+ if s.logMode == defaultLogMode {
+ go s.print("error", fileWithLineNum(), err)
+ } else {
+ s.log(err)
+ }
+
+ errors := Errors(s.GetErrors())
+ errors = errors.Add(err)
+ if len(errors) > 1 {
+ err = errors
+ }
+ }
+
+ s.Error = err
+ }
+ return err
+}
+
+// GetErrors get happened errors from the db
+func (s *DB) GetErrors() []error {
+ if errs, ok := s.Error.(Errors); ok {
+ return errs
+ } else if s.Error != nil {
+ return []error{s.Error}
+ }
+ return []error{}
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Private Methods For DB
+////////////////////////////////////////////////////////////////////////////////
+
+func (s *DB) clone() *DB {
+ db := &DB{
+ db: s.db,
+ parent: s.parent,
+ logger: s.logger,
+ logMode: s.logMode,
+ Value: s.Value,
+ Error: s.Error,
+ blockGlobalUpdate: s.blockGlobalUpdate,
+ dialect: newDialect(s.dialect.GetName(), s.db),
+ nowFuncOverride: s.nowFuncOverride,
+ }
+
+ s.values.Range(func(k, v interface{}) bool {
+ db.values.Store(k, v)
+ return true
+ })
+
+ if s.search == nil {
+ db.search = &search{limit: -1, offset: -1}
+ } else {
+ db.search = s.search.clone()
+ }
+
+ db.search.db = db
+ return db
+}
+
+func (s *DB) print(v ...interface{}) {
+ s.logger.Print(v...)
+}
+
+func (s *DB) log(v ...interface{}) {
+ if s != nil && s.logMode == detailedLogMode {
+ s.print(append([]interface{}{"log", fileWithLineNum()}, v...)...)
+ }
+}
+
+func (s *DB) slog(sql string, t time.Time, vars ...interface{}) {
+ if s.logMode == detailedLogMode {
+ s.print("sql", fileWithLineNum(), NowFunc().Sub(t), sql, vars, s.RowsAffected)
+ }
+}
diff --git a/vendor/github.com/jinzhu/gorm/model.go b/vendor/github.com/jinzhu/gorm/model.go
new file mode 100644
index 0000000..f37ff7e
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/model.go
@@ -0,0 +1,14 @@
+package gorm
+
+import "time"
+
+// Model base model definition, including fields `ID`, `CreatedAt`, `UpdatedAt`, `DeletedAt`, which could be embedded in your models
+// type User struct {
+// gorm.Model
+// }
+type Model struct {
+ ID uint `gorm:"primary_key"`
+ CreatedAt time.Time
+ UpdatedAt time.Time
+ DeletedAt *time.Time `sql:"index"`
+}
diff --git a/vendor/github.com/jinzhu/gorm/model_struct.go b/vendor/github.com/jinzhu/gorm/model_struct.go
new file mode 100644
index 0000000..57dbec3
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/model_struct.go
@@ -0,0 +1,677 @@
+package gorm
+
+import (
+ "database/sql"
+ "errors"
+ "go/ast"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/jinzhu/inflection"
+)
+
+// DefaultTableNameHandler default table name handler
+var DefaultTableNameHandler = func(db *DB, defaultTableName string) string {
+ return defaultTableName
+}
+
+// lock for mutating global cached model metadata
+var structsLock sync.Mutex
+
+// global cache of model metadata
+var modelStructsMap sync.Map
+
+// ModelStruct model definition
+type ModelStruct struct {
+ PrimaryFields []*StructField
+ StructFields []*StructField
+ ModelType reflect.Type
+
+ defaultTableName string
+ l sync.Mutex
+}
+
+// TableName returns model's table name
+func (s *ModelStruct) TableName(db *DB) string {
+ s.l.Lock()
+ defer s.l.Unlock()
+
+ if s.defaultTableName == "" && db != nil && s.ModelType != nil {
+ // Set default table name
+ if tabler, ok := reflect.New(s.ModelType).Interface().(tabler); ok {
+ s.defaultTableName = tabler.TableName()
+ } else {
+ tableName := ToTableName(s.ModelType.Name())
+ db.parent.RLock()
+ if db == nil || (db.parent != nil && !db.parent.singularTable) {
+ tableName = inflection.Plural(tableName)
+ }
+ db.parent.RUnlock()
+ s.defaultTableName = tableName
+ }
+ }
+
+ return DefaultTableNameHandler(db, s.defaultTableName)
+}
+
+// StructField model field's struct definition
+type StructField struct {
+ DBName string
+ Name string
+ Names []string
+ IsPrimaryKey bool
+ IsNormal bool
+ IsIgnored bool
+ IsScanner bool
+ HasDefaultValue bool
+ Tag reflect.StructTag
+ TagSettings map[string]string
+ Struct reflect.StructField
+ IsForeignKey bool
+ Relationship *Relationship
+
+ tagSettingsLock sync.RWMutex
+}
+
+// TagSettingsSet Sets a tag in the tag settings map
+func (sf *StructField) TagSettingsSet(key, val string) {
+ sf.tagSettingsLock.Lock()
+ defer sf.tagSettingsLock.Unlock()
+ sf.TagSettings[key] = val
+}
+
+// TagSettingsGet returns a tag from the tag settings
+func (sf *StructField) TagSettingsGet(key string) (string, bool) {
+ sf.tagSettingsLock.RLock()
+ defer sf.tagSettingsLock.RUnlock()
+ val, ok := sf.TagSettings[key]
+ return val, ok
+}
+
+// TagSettingsDelete deletes a tag
+func (sf *StructField) TagSettingsDelete(key string) {
+ sf.tagSettingsLock.Lock()
+ defer sf.tagSettingsLock.Unlock()
+ delete(sf.TagSettings, key)
+}
+
+func (sf *StructField) clone() *StructField {
+ clone := &StructField{
+ DBName: sf.DBName,
+ Name: sf.Name,
+ Names: sf.Names,
+ IsPrimaryKey: sf.IsPrimaryKey,
+ IsNormal: sf.IsNormal,
+ IsIgnored: sf.IsIgnored,
+ IsScanner: sf.IsScanner,
+ HasDefaultValue: sf.HasDefaultValue,
+ Tag: sf.Tag,
+ TagSettings: map[string]string{},
+ Struct: sf.Struct,
+ IsForeignKey: sf.IsForeignKey,
+ }
+
+ if sf.Relationship != nil {
+ relationship := *sf.Relationship
+ clone.Relationship = &relationship
+ }
+
+ // copy the struct field tagSettings, they should be read-locked while they are copied
+ sf.tagSettingsLock.Lock()
+ defer sf.tagSettingsLock.Unlock()
+ for key, value := range sf.TagSettings {
+ clone.TagSettings[key] = value
+ }
+
+ return clone
+}
+
+// Relationship described the relationship between models
+type Relationship struct {
+ Kind string
+ PolymorphicType string
+ PolymorphicDBName string
+ PolymorphicValue string
+ ForeignFieldNames []string
+ ForeignDBNames []string
+ AssociationForeignFieldNames []string
+ AssociationForeignDBNames []string
+ JoinTableHandler JoinTableHandlerInterface
+}
+
+func getForeignField(column string, fields []*StructField) *StructField {
+ for _, field := range fields {
+ if field.Name == column || field.DBName == column || field.DBName == ToColumnName(column) {
+ return field
+ }
+ }
+ return nil
+}
+
+// GetModelStruct get value's model struct, relationships based on struct and tag definition
+func (scope *Scope) GetModelStruct() *ModelStruct {
+ return scope.getModelStruct(scope, make([]*StructField, 0))
+}
+
+func (scope *Scope) getModelStruct(rootScope *Scope, allFields []*StructField) *ModelStruct {
+ var modelStruct ModelStruct
+ // Scope value can't be nil
+ if scope.Value == nil {
+ return &modelStruct
+ }
+
+ reflectType := reflect.ValueOf(scope.Value).Type()
+ for reflectType.Kind() == reflect.Slice || reflectType.Kind() == reflect.Ptr {
+ reflectType = reflectType.Elem()
+ }
+
+ // Scope value need to be a struct
+ if reflectType.Kind() != reflect.Struct {
+ return &modelStruct
+ }
+
+ // Get Cached model struct
+ isSingularTable := false
+ if scope.db != nil && scope.db.parent != nil {
+ scope.db.parent.RLock()
+ isSingularTable = scope.db.parent.singularTable
+ scope.db.parent.RUnlock()
+ }
+
+ hashKey := struct {
+ singularTable bool
+ reflectType reflect.Type
+ }{isSingularTable, reflectType}
+ if value, ok := modelStructsMap.Load(hashKey); ok && value != nil {
+ return value.(*ModelStruct)
+ }
+
+ modelStruct.ModelType = reflectType
+
+ // Get all fields
+ for i := 0; i < reflectType.NumField(); i++ {
+ if fieldStruct := reflectType.Field(i); ast.IsExported(fieldStruct.Name) {
+ field := &StructField{
+ Struct: fieldStruct,
+ Name: fieldStruct.Name,
+ Names: []string{fieldStruct.Name},
+ Tag: fieldStruct.Tag,
+ TagSettings: parseTagSetting(fieldStruct.Tag),
+ }
+
+ // is ignored field
+ if _, ok := field.TagSettingsGet("-"); ok {
+ field.IsIgnored = true
+ } else {
+ if _, ok := field.TagSettingsGet("PRIMARY_KEY"); ok {
+ field.IsPrimaryKey = true
+ modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field)
+ }
+
+ if _, ok := field.TagSettingsGet("DEFAULT"); ok && !field.IsPrimaryKey {
+ field.HasDefaultValue = true
+ }
+
+ if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok && !field.IsPrimaryKey {
+ field.HasDefaultValue = true
+ }
+
+ indirectType := fieldStruct.Type
+ for indirectType.Kind() == reflect.Ptr {
+ indirectType = indirectType.Elem()
+ }
+
+ fieldValue := reflect.New(indirectType).Interface()
+ if _, isScanner := fieldValue.(sql.Scanner); isScanner {
+ // is scanner
+ field.IsScanner, field.IsNormal = true, true
+ if indirectType.Kind() == reflect.Struct {
+ for i := 0; i < indirectType.NumField(); i++ {
+ for key, value := range parseTagSetting(indirectType.Field(i).Tag) {
+ if _, ok := field.TagSettingsGet(key); !ok {
+ field.TagSettingsSet(key, value)
+ }
+ }
+ }
+ }
+ } else if _, isTime := fieldValue.(*time.Time); isTime {
+ // is time
+ field.IsNormal = true
+ } else if _, ok := field.TagSettingsGet("EMBEDDED"); ok || fieldStruct.Anonymous {
+ // is embedded struct
+ for _, subField := range scope.New(fieldValue).getModelStruct(rootScope, allFields).StructFields {
+ subField = subField.clone()
+ subField.Names = append([]string{fieldStruct.Name}, subField.Names...)
+ if prefix, ok := field.TagSettingsGet("EMBEDDED_PREFIX"); ok {
+ subField.DBName = prefix + subField.DBName
+ }
+
+ if subField.IsPrimaryKey {
+ if _, ok := subField.TagSettingsGet("PRIMARY_KEY"); ok {
+ modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, subField)
+ } else {
+ subField.IsPrimaryKey = false
+ }
+ }
+
+ if subField.Relationship != nil && subField.Relationship.JoinTableHandler != nil {
+ if joinTableHandler, ok := subField.Relationship.JoinTableHandler.(*JoinTableHandler); ok {
+ newJoinTableHandler := &JoinTableHandler{}
+ newJoinTableHandler.Setup(subField.Relationship, joinTableHandler.TableName, reflectType, joinTableHandler.Destination.ModelType)
+ subField.Relationship.JoinTableHandler = newJoinTableHandler
+ }
+ }
+
+ modelStruct.StructFields = append(modelStruct.StructFields, subField)
+ allFields = append(allFields, subField)
+ }
+ continue
+ } else {
+ // build relationships
+ switch indirectType.Kind() {
+ case reflect.Slice:
+ defer func(field *StructField) {
+ var (
+ relationship = &Relationship{}
+ toScope = scope.New(reflect.New(field.Struct.Type).Interface())
+ foreignKeys []string
+ associationForeignKeys []string
+ elemType = field.Struct.Type
+ )
+
+ if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" {
+ foreignKeys = strings.Split(foreignKey, ",")
+ }
+
+ if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" {
+ associationForeignKeys = strings.Split(foreignKey, ",")
+ } else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" {
+ associationForeignKeys = strings.Split(foreignKey, ",")
+ }
+
+ for elemType.Kind() == reflect.Slice || elemType.Kind() == reflect.Ptr {
+ elemType = elemType.Elem()
+ }
+
+ if elemType.Kind() == reflect.Struct {
+ if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" {
+ relationship.Kind = "many_to_many"
+
+ { // Foreign Keys for Source
+ joinTableDBNames := []string{}
+
+ if foreignKey, _ := field.TagSettingsGet("JOINTABLE_FOREIGNKEY"); foreignKey != "" {
+ joinTableDBNames = strings.Split(foreignKey, ",")
+ }
+
+ // if no foreign keys defined with tag
+ if len(foreignKeys) == 0 {
+ for _, field := range modelStruct.PrimaryFields {
+ foreignKeys = append(foreignKeys, field.DBName)
+ }
+ }
+
+ for idx, foreignKey := range foreignKeys {
+ if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil {
+ // source foreign keys (db names)
+ relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.DBName)
+
+ // setup join table foreign keys for source
+ if len(joinTableDBNames) > idx {
+ // if defined join table's foreign key
+ relationship.ForeignDBNames = append(relationship.ForeignDBNames, joinTableDBNames[idx])
+ } else {
+ defaultJointableForeignKey := ToColumnName(reflectType.Name()) + "_" + foreignField.DBName
+ relationship.ForeignDBNames = append(relationship.ForeignDBNames, defaultJointableForeignKey)
+ }
+ }
+ }
+ }
+
+ { // Foreign Keys for Association (Destination)
+ associationJoinTableDBNames := []string{}
+
+ if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_JOINTABLE_FOREIGNKEY"); foreignKey != "" {
+ associationJoinTableDBNames = strings.Split(foreignKey, ",")
+ }
+
+ // if no association foreign keys defined with tag
+ if len(associationForeignKeys) == 0 {
+ for _, field := range toScope.PrimaryFields() {
+ associationForeignKeys = append(associationForeignKeys, field.DBName)
+ }
+ }
+
+ for idx, name := range associationForeignKeys {
+ if field, ok := toScope.FieldByName(name); ok {
+ // association foreign keys (db names)
+ relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, field.DBName)
+
+ // setup join table foreign keys for association
+ if len(associationJoinTableDBNames) > idx {
+ relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationJoinTableDBNames[idx])
+ } else {
+ // join table foreign keys for association
+ joinTableDBName := ToColumnName(elemType.Name()) + "_" + field.DBName
+ relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, joinTableDBName)
+ }
+ }
+ }
+ }
+
+ joinTableHandler := JoinTableHandler{}
+ joinTableHandler.Setup(relationship, many2many, reflectType, elemType)
+ relationship.JoinTableHandler = &joinTableHandler
+ field.Relationship = relationship
+ } else {
+ // User has many comments, associationType is User, comment use UserID as foreign key
+ var associationType = reflectType.Name()
+ var toFields = toScope.GetStructFields()
+ relationship.Kind = "has_many"
+
+ if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" {
+ // Dog has many toys, tag polymorphic is Owner, then associationType is Owner
+ // Toy use OwnerID, OwnerType ('dogs') as foreign key
+ if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil {
+ associationType = polymorphic
+ relationship.PolymorphicType = polymorphicType.Name
+ relationship.PolymorphicDBName = polymorphicType.DBName
+ // if Dog has multiple set of toys set name of the set (instead of default 'dogs')
+ if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok {
+ relationship.PolymorphicValue = value
+ } else {
+ relationship.PolymorphicValue = scope.TableName()
+ }
+ polymorphicType.IsForeignKey = true
+ }
+ }
+
+ // if no foreign keys defined with tag
+ if len(foreignKeys) == 0 {
+ // if no association foreign keys defined with tag
+ if len(associationForeignKeys) == 0 {
+ for _, field := range modelStruct.PrimaryFields {
+ foreignKeys = append(foreignKeys, associationType+field.Name)
+ associationForeignKeys = append(associationForeignKeys, field.Name)
+ }
+ } else {
+ // generate foreign keys from defined association foreign keys
+ for _, scopeFieldName := range associationForeignKeys {
+ if foreignField := getForeignField(scopeFieldName, allFields); foreignField != nil {
+ foreignKeys = append(foreignKeys, associationType+foreignField.Name)
+ associationForeignKeys = append(associationForeignKeys, foreignField.Name)
+ }
+ }
+ }
+ } else {
+ // generate association foreign keys from foreign keys
+ if len(associationForeignKeys) == 0 {
+ for _, foreignKey := range foreignKeys {
+ if strings.HasPrefix(foreignKey, associationType) {
+ associationForeignKey := strings.TrimPrefix(foreignKey, associationType)
+ if foreignField := getForeignField(associationForeignKey, allFields); foreignField != nil {
+ associationForeignKeys = append(associationForeignKeys, associationForeignKey)
+ }
+ }
+ }
+ if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
+ associationForeignKeys = []string{rootScope.PrimaryKey()}
+ }
+ } else if len(foreignKeys) != len(associationForeignKeys) {
+ scope.Err(errors.New("invalid foreign keys, should have same length"))
+ return
+ }
+ }
+
+ for idx, foreignKey := range foreignKeys {
+ if foreignField := getForeignField(foreignKey, toFields); foreignField != nil {
+ if associationField := getForeignField(associationForeignKeys[idx], allFields); associationField != nil {
+ // mark field as foreignkey, use global lock to avoid race
+ structsLock.Lock()
+ foreignField.IsForeignKey = true
+ structsLock.Unlock()
+
+ // association foreign keys
+ relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name)
+ relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName)
+
+ // association foreign keys
+ relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
+ relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
+ }
+ }
+ }
+
+ if len(relationship.ForeignFieldNames) != 0 {
+ field.Relationship = relationship
+ }
+ }
+ } else {
+ field.IsNormal = true
+ }
+ }(field)
+ case reflect.Struct:
+ defer func(field *StructField) {
+ var (
+ // user has one profile, associationType is User, profile use UserID as foreign key
+ // user belongs to profile, associationType is Profile, user use ProfileID as foreign key
+ associationType = reflectType.Name()
+ relationship = &Relationship{}
+ toScope = scope.New(reflect.New(field.Struct.Type).Interface())
+ toFields = toScope.GetStructFields()
+ tagForeignKeys []string
+ tagAssociationForeignKeys []string
+ )
+
+ if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" {
+ tagForeignKeys = strings.Split(foreignKey, ",")
+ }
+
+ if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" {
+ tagAssociationForeignKeys = strings.Split(foreignKey, ",")
+ } else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" {
+ tagAssociationForeignKeys = strings.Split(foreignKey, ",")
+ }
+
+ if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" {
+ // Cat has one toy, tag polymorphic is Owner, then associationType is Owner
+ // Toy use OwnerID, OwnerType ('cats') as foreign key
+ if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil {
+ associationType = polymorphic
+ relationship.PolymorphicType = polymorphicType.Name
+ relationship.PolymorphicDBName = polymorphicType.DBName
+ // if Cat has several different types of toys set name for each (instead of default 'cats')
+ if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok {
+ relationship.PolymorphicValue = value
+ } else {
+ relationship.PolymorphicValue = scope.TableName()
+ }
+ polymorphicType.IsForeignKey = true
+ }
+ }
+
+ // Has One
+ {
+ var foreignKeys = tagForeignKeys
+ var associationForeignKeys = tagAssociationForeignKeys
+ // if no foreign keys defined with tag
+ if len(foreignKeys) == 0 {
+ // if no association foreign keys defined with tag
+ if len(associationForeignKeys) == 0 {
+ for _, primaryField := range modelStruct.PrimaryFields {
+ foreignKeys = append(foreignKeys, associationType+primaryField.Name)
+ associationForeignKeys = append(associationForeignKeys, primaryField.Name)
+ }
+ } else {
+ // generate foreign keys form association foreign keys
+ for _, associationForeignKey := range tagAssociationForeignKeys {
+ if foreignField := getForeignField(associationForeignKey, allFields); foreignField != nil {
+ foreignKeys = append(foreignKeys, associationType+foreignField.Name)
+ associationForeignKeys = append(associationForeignKeys, foreignField.Name)
+ }
+ }
+ }
+ } else {
+ // generate association foreign keys from foreign keys
+ if len(associationForeignKeys) == 0 {
+ for _, foreignKey := range foreignKeys {
+ if strings.HasPrefix(foreignKey, associationType) {
+ associationForeignKey := strings.TrimPrefix(foreignKey, associationType)
+ if foreignField := getForeignField(associationForeignKey, allFields); foreignField != nil {
+ associationForeignKeys = append(associationForeignKeys, associationForeignKey)
+ }
+ }
+ }
+ if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
+ associationForeignKeys = []string{rootScope.PrimaryKey()}
+ }
+ } else if len(foreignKeys) != len(associationForeignKeys) {
+ scope.Err(errors.New("invalid foreign keys, should have same length"))
+ return
+ }
+ }
+
+ for idx, foreignKey := range foreignKeys {
+ if foreignField := getForeignField(foreignKey, toFields); foreignField != nil {
+ if scopeField := getForeignField(associationForeignKeys[idx], allFields); scopeField != nil {
+ // mark field as foreignkey, use global lock to avoid race
+ structsLock.Lock()
+ foreignField.IsForeignKey = true
+ structsLock.Unlock()
+
+ // association foreign keys
+ relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, scopeField.Name)
+ relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, scopeField.DBName)
+
+ // association foreign keys
+ relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
+ relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
+ }
+ }
+ }
+ }
+
+ if len(relationship.ForeignFieldNames) != 0 {
+ relationship.Kind = "has_one"
+ field.Relationship = relationship
+ } else {
+ var foreignKeys = tagForeignKeys
+ var associationForeignKeys = tagAssociationForeignKeys
+
+ if len(foreignKeys) == 0 {
+ // generate foreign keys & association foreign keys
+ if len(associationForeignKeys) == 0 {
+ for _, primaryField := range toScope.PrimaryFields() {
+ foreignKeys = append(foreignKeys, field.Name+primaryField.Name)
+ associationForeignKeys = append(associationForeignKeys, primaryField.Name)
+ }
+ } else {
+ // generate foreign keys with association foreign keys
+ for _, associationForeignKey := range associationForeignKeys {
+ if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil {
+ foreignKeys = append(foreignKeys, field.Name+foreignField.Name)
+ associationForeignKeys = append(associationForeignKeys, foreignField.Name)
+ }
+ }
+ }
+ } else {
+ // generate foreign keys & association foreign keys
+ if len(associationForeignKeys) == 0 {
+ for _, foreignKey := range foreignKeys {
+ if strings.HasPrefix(foreignKey, field.Name) {
+ associationForeignKey := strings.TrimPrefix(foreignKey, field.Name)
+ if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil {
+ associationForeignKeys = append(associationForeignKeys, associationForeignKey)
+ }
+ }
+ }
+ if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
+ associationForeignKeys = []string{toScope.PrimaryKey()}
+ }
+ } else if len(foreignKeys) != len(associationForeignKeys) {
+ scope.Err(errors.New("invalid foreign keys, should have same length"))
+ return
+ }
+ }
+
+ for idx, foreignKey := range foreignKeys {
+ if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil {
+ if associationField := getForeignField(associationForeignKeys[idx], toFields); associationField != nil {
+ // mark field as foreignkey, use global lock to avoid race
+ structsLock.Lock()
+ foreignField.IsForeignKey = true
+ structsLock.Unlock()
+
+ // association foreign keys
+ relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name)
+ relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName)
+
+ // source foreign keys
+ relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
+ relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
+ }
+ }
+ }
+
+ if len(relationship.ForeignFieldNames) != 0 {
+ relationship.Kind = "belongs_to"
+ field.Relationship = relationship
+ }
+ }
+ }(field)
+ default:
+ field.IsNormal = true
+ }
+ }
+ }
+
+ // Even it is ignored, also possible to decode db value into the field
+ if value, ok := field.TagSettingsGet("COLUMN"); ok {
+ field.DBName = value
+ } else {
+ field.DBName = ToColumnName(fieldStruct.Name)
+ }
+
+ modelStruct.StructFields = append(modelStruct.StructFields, field)
+ allFields = append(allFields, field)
+ }
+ }
+
+ if len(modelStruct.PrimaryFields) == 0 {
+ if field := getForeignField("id", modelStruct.StructFields); field != nil {
+ field.IsPrimaryKey = true
+ modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field)
+ }
+ }
+
+ modelStructsMap.Store(hashKey, &modelStruct)
+
+ return &modelStruct
+}
+
+// GetStructFields get model's field structs
+func (scope *Scope) GetStructFields() (fields []*StructField) {
+ return scope.GetModelStruct().StructFields
+}
+
+func parseTagSetting(tags reflect.StructTag) map[string]string {
+ setting := map[string]string{}
+ for _, str := range []string{tags.Get("sql"), tags.Get("gorm")} {
+ if str == "" {
+ continue
+ }
+ tags := strings.Split(str, ";")
+ for _, value := range tags {
+ v := strings.Split(value, ":")
+ k := strings.TrimSpace(strings.ToUpper(v[0]))
+ if len(v) >= 2 {
+ setting[k] = strings.Join(v[1:], ":")
+ } else {
+ setting[k] = k
+ }
+ }
+ }
+ return setting
+}
diff --git a/vendor/github.com/jinzhu/gorm/naming.go b/vendor/github.com/jinzhu/gorm/naming.go
new file mode 100644
index 0000000..6b0a4fd
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/naming.go
@@ -0,0 +1,124 @@
+package gorm
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Namer is a function type which is given a string and return a string
+type Namer func(string) string
+
+// NamingStrategy represents naming strategies
+type NamingStrategy struct {
+ DB Namer
+ Table Namer
+ Column Namer
+}
+
+// TheNamingStrategy is being initialized with defaultNamingStrategy
+var TheNamingStrategy = &NamingStrategy{
+ DB: defaultNamer,
+ Table: defaultNamer,
+ Column: defaultNamer,
+}
+
+// AddNamingStrategy sets the naming strategy
+func AddNamingStrategy(ns *NamingStrategy) {
+ if ns.DB == nil {
+ ns.DB = defaultNamer
+ }
+ if ns.Table == nil {
+ ns.Table = defaultNamer
+ }
+ if ns.Column == nil {
+ ns.Column = defaultNamer
+ }
+ TheNamingStrategy = ns
+}
+
+// DBName alters the given name by DB
+func (ns *NamingStrategy) DBName(name string) string {
+ return ns.DB(name)
+}
+
+// TableName alters the given name by Table
+func (ns *NamingStrategy) TableName(name string) string {
+ return ns.Table(name)
+}
+
+// ColumnName alters the given name by Column
+func (ns *NamingStrategy) ColumnName(name string) string {
+ return ns.Column(name)
+}
+
+// ToDBName convert string to db name
+func ToDBName(name string) string {
+ return TheNamingStrategy.DBName(name)
+}
+
+// ToTableName convert string to table name
+func ToTableName(name string) string {
+ return TheNamingStrategy.TableName(name)
+}
+
+// ToColumnName convert string to db name
+func ToColumnName(name string) string {
+ return TheNamingStrategy.ColumnName(name)
+}
+
+var smap = newSafeMap()
+
+func defaultNamer(name string) string {
+ const (
+ lower = false
+ upper = true
+ )
+
+ if v := smap.Get(name); v != "" {
+ return v
+ }
+
+ if name == "" {
+ return ""
+ }
+
+ var (
+ value = commonInitialismsReplacer.Replace(name)
+ buf = bytes.NewBufferString("")
+ lastCase, currCase, nextCase, nextNumber bool
+ )
+
+ for i, v := range value[:len(value)-1] {
+ nextCase = bool(value[i+1] >= 'A' && value[i+1] <= 'Z')
+ nextNumber = bool(value[i+1] >= '0' && value[i+1] <= '9')
+
+ if i > 0 {
+ if currCase == upper {
+ if lastCase == upper && (nextCase == upper || nextNumber == upper) {
+ buf.WriteRune(v)
+ } else {
+ if value[i-1] != '_' && value[i+1] != '_' {
+ buf.WriteRune('_')
+ }
+ buf.WriteRune(v)
+ }
+ } else {
+ buf.WriteRune(v)
+ if i == len(value)-2 && (nextCase == upper && nextNumber == lower) {
+ buf.WriteRune('_')
+ }
+ }
+ } else {
+ currCase = upper
+ buf.WriteRune(v)
+ }
+ lastCase = currCase
+ currCase = nextCase
+ }
+
+ buf.WriteByte(value[len(value)-1])
+
+ s := strings.ToLower(buf.String())
+ smap.Set(name, s)
+ return s
+}
diff --git a/vendor/github.com/jinzhu/gorm/scope.go b/vendor/github.com/jinzhu/gorm/scope.go
new file mode 100644
index 0000000..56c3d6e
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/scope.go
@@ -0,0 +1,1425 @@
+package gorm
+
+import (
+ "bytes"
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+ "time"
+)
+
+// Scope contain current operation's information when you perform any operation on the database
+type Scope struct {
+ Search *search
+ Value interface{}
+ SQL string
+ SQLVars []interface{}
+ db *DB
+ instanceID string
+ primaryKeyField *Field
+ skipLeft bool
+ fields *[]*Field
+ selectAttrs *[]string
+}
+
+// IndirectValue return scope's reflect value's indirect value
+func (scope *Scope) IndirectValue() reflect.Value {
+ return indirect(reflect.ValueOf(scope.Value))
+}
+
+// New create a new Scope without search information
+func (scope *Scope) New(value interface{}) *Scope {
+ return &Scope{db: scope.NewDB(), Search: &search{}, Value: value}
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Scope DB
+////////////////////////////////////////////////////////////////////////////////
+
+// DB return scope's DB connection
+func (scope *Scope) DB() *DB {
+ return scope.db
+}
+
+// NewDB create a new DB without search information
+func (scope *Scope) NewDB() *DB {
+ if scope.db != nil {
+ db := scope.db.clone()
+ db.search = nil
+ db.Value = nil
+ return db
+ }
+ return nil
+}
+
+// SQLDB return *sql.DB
+func (scope *Scope) SQLDB() SQLCommon {
+ return scope.db.db
+}
+
+// Dialect get dialect
+func (scope *Scope) Dialect() Dialect {
+ return scope.db.dialect
+}
+
+// Quote used to quote string to escape them for database
+func (scope *Scope) Quote(str string) string {
+ if strings.Contains(str, ".") {
+ newStrs := []string{}
+ for _, str := range strings.Split(str, ".") {
+ newStrs = append(newStrs, scope.Dialect().Quote(str))
+ }
+ return strings.Join(newStrs, ".")
+ }
+
+ return scope.Dialect().Quote(str)
+}
+
+// Err add error to Scope
+func (scope *Scope) Err(err error) error {
+ if err != nil {
+ scope.db.AddError(err)
+ }
+ return err
+}
+
+// HasError check if there are any error
+func (scope *Scope) HasError() bool {
+ return scope.db.Error != nil
+}
+
+// Log print log message
+func (scope *Scope) Log(v ...interface{}) {
+ scope.db.log(v...)
+}
+
+// SkipLeft skip remaining callbacks
+func (scope *Scope) SkipLeft() {
+ scope.skipLeft = true
+}
+
+// Fields get value's fields
+func (scope *Scope) Fields() []*Field {
+ if scope.fields == nil {
+ var (
+ fields []*Field
+ indirectScopeValue = scope.IndirectValue()
+ isStruct = indirectScopeValue.Kind() == reflect.Struct
+ )
+
+ for _, structField := range scope.GetModelStruct().StructFields {
+ if isStruct {
+ fieldValue := indirectScopeValue
+ for _, name := range structField.Names {
+ if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() {
+ fieldValue.Set(reflect.New(fieldValue.Type().Elem()))
+ }
+ fieldValue = reflect.Indirect(fieldValue).FieldByName(name)
+ }
+ fields = append(fields, &Field{StructField: structField, Field: fieldValue, IsBlank: isBlank(fieldValue)})
+ } else {
+ fields = append(fields, &Field{StructField: structField, IsBlank: true})
+ }
+ }
+ scope.fields = &fields
+ }
+
+ return *scope.fields
+}
+
+// FieldByName find `gorm.Field` with field name or db name
+func (scope *Scope) FieldByName(name string) (field *Field, ok bool) {
+ var (
+ dbName = ToColumnName(name)
+ mostMatchedField *Field
+ )
+
+ for _, field := range scope.Fields() {
+ if field.Name == name || field.DBName == name {
+ return field, true
+ }
+ if field.DBName == dbName {
+ mostMatchedField = field
+ }
+ }
+ return mostMatchedField, mostMatchedField != nil
+}
+
+// PrimaryFields return scope's primary fields
+func (scope *Scope) PrimaryFields() (fields []*Field) {
+ for _, field := range scope.Fields() {
+ if field.IsPrimaryKey {
+ fields = append(fields, field)
+ }
+ }
+ return fields
+}
+
+// PrimaryField return scope's main primary field, if defined more that one primary fields, will return the one having column name `id` or the first one
+func (scope *Scope) PrimaryField() *Field {
+ if primaryFields := scope.GetModelStruct().PrimaryFields; len(primaryFields) > 0 {
+ if len(primaryFields) > 1 {
+ if field, ok := scope.FieldByName("id"); ok {
+ return field
+ }
+ }
+ return scope.PrimaryFields()[0]
+ }
+ return nil
+}
+
+// PrimaryKey get main primary field's db name
+func (scope *Scope) PrimaryKey() string {
+ if field := scope.PrimaryField(); field != nil {
+ return field.DBName
+ }
+ return ""
+}
+
+// PrimaryKeyZero check main primary field's value is blank or not
+func (scope *Scope) PrimaryKeyZero() bool {
+ field := scope.PrimaryField()
+ return field == nil || field.IsBlank
+}
+
+// PrimaryKeyValue get the primary key's value
+func (scope *Scope) PrimaryKeyValue() interface{} {
+ if field := scope.PrimaryField(); field != nil && field.Field.IsValid() {
+ return field.Field.Interface()
+ }
+ return 0
+}
+
+// HasColumn to check if has column
+func (scope *Scope) HasColumn(column string) bool {
+ for _, field := range scope.GetStructFields() {
+ if field.IsNormal && (field.Name == column || field.DBName == column) {
+ return true
+ }
+ }
+ return false
+}
+
+// SetColumn to set the column's value, column could be field or field's name/dbname
+func (scope *Scope) SetColumn(column interface{}, value interface{}) error {
+ var updateAttrs = map[string]interface{}{}
+ if attrs, ok := scope.InstanceGet("gorm:update_attrs"); ok {
+ updateAttrs = attrs.(map[string]interface{})
+ defer scope.InstanceSet("gorm:update_attrs", updateAttrs)
+ }
+
+ if field, ok := column.(*Field); ok {
+ updateAttrs[field.DBName] = value
+ return field.Set(value)
+ } else if name, ok := column.(string); ok {
+ var (
+ dbName = ToDBName(name)
+ mostMatchedField *Field
+ )
+ for _, field := range scope.Fields() {
+ if field.DBName == value {
+ updateAttrs[field.DBName] = value
+ return field.Set(value)
+ }
+ if !field.IsIgnored && ((field.DBName == dbName) || (field.Name == name && mostMatchedField == nil)) {
+ mostMatchedField = field
+ }
+ }
+
+ if mostMatchedField != nil {
+ updateAttrs[mostMatchedField.DBName] = value
+ return mostMatchedField.Set(value)
+ }
+ }
+ return errors.New("could not convert column to field")
+}
+
+// CallMethod call scope value's method, if it is a slice, will call its element's method one by one
+func (scope *Scope) CallMethod(methodName string) {
+ if scope.Value == nil {
+ return
+ }
+
+ if indirectScopeValue := scope.IndirectValue(); indirectScopeValue.Kind() == reflect.Slice {
+ for i := 0; i < indirectScopeValue.Len(); i++ {
+ scope.callMethod(methodName, indirectScopeValue.Index(i))
+ }
+ } else {
+ scope.callMethod(methodName, indirectScopeValue)
+ }
+}
+
+// AddToVars add value as sql's vars, used to prevent SQL injection
+func (scope *Scope) AddToVars(value interface{}) string {
+ _, skipBindVar := scope.InstanceGet("skip_bindvar")
+
+ if expr, ok := value.(*SqlExpr); ok {
+ exp := expr.expr
+ for _, arg := range expr.args {
+ if skipBindVar {
+ scope.AddToVars(arg)
+ } else {
+ exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1)
+ }
+ }
+ return exp
+ }
+
+ scope.SQLVars = append(scope.SQLVars, value)
+
+ if skipBindVar {
+ return "?"
+ }
+ return scope.Dialect().BindVar(len(scope.SQLVars))
+}
+
+// SelectAttrs return selected attributes
+func (scope *Scope) SelectAttrs() []string {
+ if scope.selectAttrs == nil {
+ attrs := []string{}
+ for _, value := range scope.Search.selects {
+ if str, ok := value.(string); ok {
+ attrs = append(attrs, str)
+ } else if strs, ok := value.([]string); ok {
+ attrs = append(attrs, strs...)
+ } else if strs, ok := value.([]interface{}); ok {
+ for _, str := range strs {
+ attrs = append(attrs, fmt.Sprintf("%v", str))
+ }
+ }
+ }
+ scope.selectAttrs = &attrs
+ }
+ return *scope.selectAttrs
+}
+
+// OmitAttrs return omitted attributes
+func (scope *Scope) OmitAttrs() []string {
+ return scope.Search.omits
+}
+
+type tabler interface {
+ TableName() string
+}
+
+type dbTabler interface {
+ TableName(*DB) string
+}
+
+// TableName return table name
+func (scope *Scope) TableName() string {
+ if scope.Search != nil && len(scope.Search.tableName) > 0 {
+ return scope.Search.tableName
+ }
+
+ if tabler, ok := scope.Value.(tabler); ok {
+ return tabler.TableName()
+ }
+
+ if tabler, ok := scope.Value.(dbTabler); ok {
+ return tabler.TableName(scope.db)
+ }
+
+ return scope.GetModelStruct().TableName(scope.db.Model(scope.Value))
+}
+
+// QuotedTableName return quoted table name
+func (scope *Scope) QuotedTableName() (name string) {
+ if scope.Search != nil && len(scope.Search.tableName) > 0 {
+ if strings.Contains(scope.Search.tableName, " ") {
+ return scope.Search.tableName
+ }
+ return scope.Quote(scope.Search.tableName)
+ }
+
+ return scope.Quote(scope.TableName())
+}
+
+// CombinedConditionSql return combined condition sql
+func (scope *Scope) CombinedConditionSql() string {
+ joinSQL := scope.joinsSQL()
+ whereSQL := scope.whereSQL()
+ if scope.Search.raw {
+ whereSQL = strings.TrimSuffix(strings.TrimPrefix(whereSQL, "WHERE ("), ")")
+ }
+ return joinSQL + whereSQL + scope.groupSQL() +
+ scope.havingSQL() + scope.orderSQL() + scope.limitAndOffsetSQL()
+}
+
+// Raw set raw sql
+func (scope *Scope) Raw(sql string) *Scope {
+ scope.SQL = strings.Replace(sql, "$$$", "?", -1)
+ return scope
+}
+
+// Exec perform generated SQL
+func (scope *Scope) Exec() *Scope {
+ defer scope.trace(NowFunc())
+
+ if !scope.HasError() {
+ if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
+ if count, err := result.RowsAffected(); scope.Err(err) == nil {
+ scope.db.RowsAffected = count
+ }
+ }
+ }
+ return scope
+}
+
+// Set set value by name
+func (scope *Scope) Set(name string, value interface{}) *Scope {
+ scope.db.InstantSet(name, value)
+ return scope
+}
+
+// Get get setting by name
+func (scope *Scope) Get(name string) (interface{}, bool) {
+ return scope.db.Get(name)
+}
+
+// InstanceID get InstanceID for scope
+func (scope *Scope) InstanceID() string {
+ if scope.instanceID == "" {
+ scope.instanceID = fmt.Sprintf("%v%v", &scope, &scope.db)
+ }
+ return scope.instanceID
+}
+
+// InstanceSet set instance setting for current operation, but not for operations in callbacks, like saving associations callback
+func (scope *Scope) InstanceSet(name string, value interface{}) *Scope {
+ return scope.Set(name+scope.InstanceID(), value)
+}
+
+// InstanceGet get instance setting from current operation
+func (scope *Scope) InstanceGet(name string) (interface{}, bool) {
+ return scope.Get(name + scope.InstanceID())
+}
+
+// Begin start a transaction
+func (scope *Scope) Begin() *Scope {
+ if db, ok := scope.SQLDB().(sqlDb); ok {
+ if tx, err := db.Begin(); scope.Err(err) == nil {
+ scope.db.db = interface{}(tx).(SQLCommon)
+ scope.InstanceSet("gorm:started_transaction", true)
+ }
+ }
+ return scope
+}
+
+// CommitOrRollback commit current transaction if no error happened, otherwise will rollback it
+func (scope *Scope) CommitOrRollback() *Scope {
+ if _, ok := scope.InstanceGet("gorm:started_transaction"); ok {
+ if db, ok := scope.db.db.(sqlTx); ok {
+ if scope.HasError() {
+ db.Rollback()
+ } else {
+ scope.Err(db.Commit())
+ }
+ scope.db.db = scope.db.parent.db
+ }
+ }
+ return scope
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Private Methods For *gorm.Scope
+////////////////////////////////////////////////////////////////////////////////
+
+func (scope *Scope) callMethod(methodName string, reflectValue reflect.Value) {
+ // Only get address from non-pointer
+ if reflectValue.CanAddr() && reflectValue.Kind() != reflect.Ptr {
+ reflectValue = reflectValue.Addr()
+ }
+
+ if methodValue := reflectValue.MethodByName(methodName); methodValue.IsValid() {
+ switch method := methodValue.Interface().(type) {
+ case func():
+ method()
+ case func(*Scope):
+ method(scope)
+ case func(*DB):
+ newDB := scope.NewDB()
+ method(newDB)
+ scope.Err(newDB.Error)
+ case func() error:
+ scope.Err(method())
+ case func(*Scope) error:
+ scope.Err(method(scope))
+ case func(*DB) error:
+ newDB := scope.NewDB()
+ scope.Err(method(newDB))
+ scope.Err(newDB.Error)
+ default:
+ scope.Err(fmt.Errorf("unsupported function %v", methodName))
+ }
+ }
+}
+
+var (
+ columnRegexp = regexp.MustCompile("^[a-zA-Z\\d]+(\\.[a-zA-Z\\d]+)*$") // only match string like `name`, `users.name`
+ isNumberRegexp = regexp.MustCompile("^\\s*\\d+\\s*$") // match if string is number
+ comparisonRegexp = regexp.MustCompile("(?i) (=|<>|(>|<)(=?)|LIKE|IS|IN) ")
+ countingQueryRegexp = regexp.MustCompile("(?i)^count(.+)$")
+)
+
+func (scope *Scope) quoteIfPossible(str string) string {
+ if columnRegexp.MatchString(str) {
+ return scope.Quote(str)
+ }
+ return str
+}
+
+func (scope *Scope) scan(rows *sql.Rows, columns []string, fields []*Field) {
+ var (
+ ignored interface{}
+ values = make([]interface{}, len(columns))
+ selectFields []*Field
+ selectedColumnsMap = map[string]int{}
+ resetFields = map[int]*Field{}
+ )
+
+ for index, column := range columns {
+ values[index] = &ignored
+
+ selectFields = fields
+ offset := 0
+ if idx, ok := selectedColumnsMap[column]; ok {
+ offset = idx + 1
+ selectFields = selectFields[offset:]
+ }
+
+ for fieldIndex, field := range selectFields {
+ if field.DBName == column {
+ if field.Field.Kind() == reflect.Ptr {
+ values[index] = field.Field.Addr().Interface()
+ } else {
+ reflectValue := reflect.New(reflect.PtrTo(field.Struct.Type))
+ reflectValue.Elem().Set(field.Field.Addr())
+ values[index] = reflectValue.Interface()
+ resetFields[index] = field
+ }
+
+ selectedColumnsMap[column] = offset + fieldIndex
+
+ if field.IsNormal {
+ break
+ }
+ }
+ }
+ }
+
+ scope.Err(rows.Scan(values...))
+
+ for index, field := range resetFields {
+ if v := reflect.ValueOf(values[index]).Elem().Elem(); v.IsValid() {
+ field.Field.Set(v)
+ }
+ }
+}
+
+func (scope *Scope) primaryCondition(value interface{}) string {
+ return fmt.Sprintf("(%v.%v = %v)", scope.QuotedTableName(), scope.Quote(scope.PrimaryKey()), value)
+}
+
+func (scope *Scope) buildCondition(clause map[string]interface{}, include bool) (str string) {
+ var (
+ quotedTableName = scope.QuotedTableName()
+ quotedPrimaryKey = scope.Quote(scope.PrimaryKey())
+ equalSQL = "="
+ inSQL = "IN"
+ )
+
+ // If building not conditions
+ if !include {
+ equalSQL = "<>"
+ inSQL = "NOT IN"
+ }
+
+ switch value := clause["query"].(type) {
+ case sql.NullInt64:
+ return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, value.Int64)
+ case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
+ return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, value)
+ case []int, []int8, []int16, []int32, []int64, []uint, []uint8, []uint16, []uint32, []uint64, []string, []interface{}:
+ if !include && reflect.ValueOf(value).Len() == 0 {
+ return
+ }
+ str = fmt.Sprintf("(%v.%v %s (?))", quotedTableName, quotedPrimaryKey, inSQL)
+ clause["args"] = []interface{}{value}
+ case string:
+ if isNumberRegexp.MatchString(value) {
+ return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, scope.AddToVars(value))
+ }
+
+ if value != "" {
+ if !include {
+ if comparisonRegexp.MatchString(value) {
+ str = fmt.Sprintf("NOT (%v)", value)
+ } else {
+ str = fmt.Sprintf("(%v.%v NOT IN (?))", quotedTableName, scope.Quote(value))
+ }
+ } else {
+ str = fmt.Sprintf("(%v)", value)
+ }
+ }
+ case map[string]interface{}:
+ var sqls []string
+ for key, value := range value {
+ if value != nil {
+ sqls = append(sqls, fmt.Sprintf("(%v.%v %s %v)", quotedTableName, scope.Quote(key), equalSQL, scope.AddToVars(value)))
+ } else {
+ if !include {
+ sqls = append(sqls, fmt.Sprintf("(%v.%v IS NOT NULL)", quotedTableName, scope.Quote(key)))
+ } else {
+ sqls = append(sqls, fmt.Sprintf("(%v.%v IS NULL)", quotedTableName, scope.Quote(key)))
+ }
+ }
+ }
+ return strings.Join(sqls, " AND ")
+ case interface{}:
+ var sqls []string
+ newScope := scope.New(value)
+
+ if len(newScope.Fields()) == 0 {
+ scope.Err(fmt.Errorf("invalid query condition: %v", value))
+ return
+ }
+ scopeQuotedTableName := newScope.QuotedTableName()
+ for _, field := range newScope.Fields() {
+ if !field.IsIgnored && !field.IsBlank && field.Relationship == nil {
+ sqls = append(sqls, fmt.Sprintf("(%v.%v %s %v)", scopeQuotedTableName, scope.Quote(field.DBName), equalSQL, scope.AddToVars(field.Field.Interface())))
+ }
+ }
+ return strings.Join(sqls, " AND ")
+ default:
+ scope.Err(fmt.Errorf("invalid query condition: %v", value))
+ return
+ }
+
+ replacements := []string{}
+ args := clause["args"].([]interface{})
+ for _, arg := range args {
+ var err error
+ switch reflect.ValueOf(arg).Kind() {
+ case reflect.Slice: // For where("id in (?)", []int64{1,2})
+ if scanner, ok := interface{}(arg).(driver.Valuer); ok {
+ arg, err = scanner.Value()
+ replacements = append(replacements, scope.AddToVars(arg))
+ } else if b, ok := arg.([]byte); ok {
+ replacements = append(replacements, scope.AddToVars(b))
+ } else if as, ok := arg.([][]interface{}); ok {
+ var tempMarks []string
+ for _, a := range as {
+ var arrayMarks []string
+ for _, v := range a {
+ arrayMarks = append(arrayMarks, scope.AddToVars(v))
+ }
+
+ if len(arrayMarks) > 0 {
+ tempMarks = append(tempMarks, fmt.Sprintf("(%v)", strings.Join(arrayMarks, ",")))
+ }
+ }
+
+ if len(tempMarks) > 0 {
+ replacements = append(replacements, strings.Join(tempMarks, ","))
+ }
+ } else if values := reflect.ValueOf(arg); values.Len() > 0 {
+ var tempMarks []string
+ for i := 0; i < values.Len(); i++ {
+ tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface()))
+ }
+ replacements = append(replacements, strings.Join(tempMarks, ","))
+ } else {
+ replacements = append(replacements, scope.AddToVars(Expr("NULL")))
+ }
+ default:
+ if valuer, ok := interface{}(arg).(driver.Valuer); ok {
+ arg, err = valuer.Value()
+ }
+
+ replacements = append(replacements, scope.AddToVars(arg))
+ }
+
+ if err != nil {
+ scope.Err(err)
+ }
+ }
+
+ buff := bytes.NewBuffer([]byte{})
+ i := 0
+ for _, s := range str {
+ if s == '?' && len(replacements) > i {
+ buff.WriteString(replacements[i])
+ i++
+ } else {
+ buff.WriteRune(s)
+ }
+ }
+
+ str = buff.String()
+
+ return
+}
+
+func (scope *Scope) buildSelectQuery(clause map[string]interface{}) (str string) {
+ switch value := clause["query"].(type) {
+ case string:
+ str = value
+ case []string:
+ str = strings.Join(value, ", ")
+ }
+
+ args := clause["args"].([]interface{})
+ replacements := []string{}
+ for _, arg := range args {
+ switch reflect.ValueOf(arg).Kind() {
+ case reflect.Slice:
+ values := reflect.ValueOf(arg)
+ var tempMarks []string
+ for i := 0; i < values.Len(); i++ {
+ tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface()))
+ }
+ replacements = append(replacements, strings.Join(tempMarks, ","))
+ default:
+ if valuer, ok := interface{}(arg).(driver.Valuer); ok {
+ arg, _ = valuer.Value()
+ }
+ replacements = append(replacements, scope.AddToVars(arg))
+ }
+ }
+
+ buff := bytes.NewBuffer([]byte{})
+ i := 0
+ for pos, char := range str {
+ if str[pos] == '?' {
+ buff.WriteString(replacements[i])
+ i++
+ } else {
+ buff.WriteRune(char)
+ }
+ }
+
+ str = buff.String()
+
+ return
+}
+
+func (scope *Scope) whereSQL() (sql string) {
+ var (
+ quotedTableName = scope.QuotedTableName()
+ deletedAtField, hasDeletedAtField = scope.FieldByName("DeletedAt")
+ primaryConditions, andConditions, orConditions []string
+ )
+
+ if !scope.Search.Unscoped && hasDeletedAtField {
+ sql := fmt.Sprintf("%v.%v IS NULL", quotedTableName, scope.Quote(deletedAtField.DBName))
+ primaryConditions = append(primaryConditions, sql)
+ }
+
+ if !scope.PrimaryKeyZero() {
+ for _, field := range scope.PrimaryFields() {
+ sql := fmt.Sprintf("%v.%v = %v", quotedTableName, scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))
+ primaryConditions = append(primaryConditions, sql)
+ }
+ }
+
+ for _, clause := range scope.Search.whereConditions {
+ if sql := scope.buildCondition(clause, true); sql != "" {
+ andConditions = append(andConditions, sql)
+ }
+ }
+
+ for _, clause := range scope.Search.orConditions {
+ if sql := scope.buildCondition(clause, true); sql != "" {
+ orConditions = append(orConditions, sql)
+ }
+ }
+
+ for _, clause := range scope.Search.notConditions {
+ if sql := scope.buildCondition(clause, false); sql != "" {
+ andConditions = append(andConditions, sql)
+ }
+ }
+
+ orSQL := strings.Join(orConditions, " OR ")
+ combinedSQL := strings.Join(andConditions, " AND ")
+ if len(combinedSQL) > 0 {
+ if len(orSQL) > 0 {
+ combinedSQL = combinedSQL + " OR " + orSQL
+ }
+ } else {
+ combinedSQL = orSQL
+ }
+
+ if len(primaryConditions) > 0 {
+ sql = "WHERE " + strings.Join(primaryConditions, " AND ")
+ if len(combinedSQL) > 0 {
+ sql = sql + " AND (" + combinedSQL + ")"
+ }
+ } else if len(combinedSQL) > 0 {
+ sql = "WHERE " + combinedSQL
+ }
+ return
+}
+
+func (scope *Scope) selectSQL() string {
+ if len(scope.Search.selects) == 0 {
+ if len(scope.Search.joinConditions) > 0 {
+ return fmt.Sprintf("%v.*", scope.QuotedTableName())
+ }
+ return "*"
+ }
+ return scope.buildSelectQuery(scope.Search.selects)
+}
+
+func (scope *Scope) orderSQL() string {
+ if len(scope.Search.orders) == 0 || scope.Search.ignoreOrderQuery {
+ return ""
+ }
+
+ var orders []string
+ for _, order := range scope.Search.orders {
+ if str, ok := order.(string); ok {
+ orders = append(orders, scope.quoteIfPossible(str))
+ } else if expr, ok := order.(*SqlExpr); ok {
+ exp := expr.expr
+ for _, arg := range expr.args {
+ exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1)
+ }
+ orders = append(orders, exp)
+ }
+ }
+ return " ORDER BY " + strings.Join(orders, ",")
+}
+
+func (scope *Scope) limitAndOffsetSQL() string {
+ sql, err := scope.Dialect().LimitAndOffsetSQL(scope.Search.limit, scope.Search.offset)
+ scope.Err(err)
+ return sql
+}
+
+func (scope *Scope) groupSQL() string {
+ if len(scope.Search.group) == 0 {
+ return ""
+ }
+ return " GROUP BY " + scope.Search.group
+}
+
+func (scope *Scope) havingSQL() string {
+ if len(scope.Search.havingConditions) == 0 {
+ return ""
+ }
+
+ var andConditions []string
+ for _, clause := range scope.Search.havingConditions {
+ if sql := scope.buildCondition(clause, true); sql != "" {
+ andConditions = append(andConditions, sql)
+ }
+ }
+
+ combinedSQL := strings.Join(andConditions, " AND ")
+ if len(combinedSQL) == 0 {
+ return ""
+ }
+
+ return " HAVING " + combinedSQL
+}
+
+func (scope *Scope) joinsSQL() string {
+ var joinConditions []string
+ for _, clause := range scope.Search.joinConditions {
+ if sql := scope.buildCondition(clause, true); sql != "" {
+ joinConditions = append(joinConditions, strings.TrimSuffix(strings.TrimPrefix(sql, "("), ")"))
+ }
+ }
+
+ return strings.Join(joinConditions, " ") + " "
+}
+
+func (scope *Scope) prepareQuerySQL() {
+ if scope.Search.raw {
+ scope.Raw(scope.CombinedConditionSql())
+ } else {
+ scope.Raw(fmt.Sprintf("SELECT %v FROM %v %v", scope.selectSQL(), scope.QuotedTableName(), scope.CombinedConditionSql()))
+ }
+ return
+}
+
+func (scope *Scope) inlineCondition(values ...interface{}) *Scope {
+ if len(values) > 0 {
+ scope.Search.Where(values[0], values[1:]...)
+ }
+ return scope
+}
+
+func (scope *Scope) callCallbacks(funcs []*func(s *Scope)) *Scope {
+ defer func() {
+ if err := recover(); err != nil {
+ if db, ok := scope.db.db.(sqlTx); ok {
+ db.Rollback()
+ }
+ panic(err)
+ }
+ }()
+ for _, f := range funcs {
+ (*f)(scope)
+ if scope.skipLeft {
+ break
+ }
+ }
+ return scope
+}
+
+func convertInterfaceToMap(values interface{}, withIgnoredField bool, db *DB) map[string]interface{} {
+ var attrs = map[string]interface{}{}
+
+ switch value := values.(type) {
+ case map[string]interface{}:
+ return value
+ case []interface{}:
+ for _, v := range value {
+ for key, value := range convertInterfaceToMap(v, withIgnoredField, db) {
+ attrs[key] = value
+ }
+ }
+ case interface{}:
+ reflectValue := reflect.ValueOf(values)
+
+ switch reflectValue.Kind() {
+ case reflect.Map:
+ for _, key := range reflectValue.MapKeys() {
+ attrs[ToColumnName(key.Interface().(string))] = reflectValue.MapIndex(key).Interface()
+ }
+ default:
+ for _, field := range (&Scope{Value: values, db: db}).Fields() {
+ if !field.IsBlank && (withIgnoredField || !field.IsIgnored) {
+ attrs[field.DBName] = field.Field.Interface()
+ }
+ }
+ }
+ }
+ return attrs
+}
+
+func (scope *Scope) updatedAttrsWithValues(value interface{}) (results map[string]interface{}, hasUpdate bool) {
+ if scope.IndirectValue().Kind() != reflect.Struct {
+ return convertInterfaceToMap(value, false, scope.db), true
+ }
+
+ results = map[string]interface{}{}
+
+ for key, value := range convertInterfaceToMap(value, true, scope.db) {
+ if field, ok := scope.FieldByName(key); ok {
+ if scope.changeableField(field) {
+ if _, ok := value.(*SqlExpr); ok {
+ hasUpdate = true
+ results[field.DBName] = value
+ } else {
+ err := field.Set(value)
+ if field.IsNormal && !field.IsIgnored {
+ hasUpdate = true
+ if err == ErrUnaddressable {
+ results[field.DBName] = value
+ } else {
+ results[field.DBName] = field.Field.Interface()
+ }
+ }
+ }
+ }
+ } else {
+ results[key] = value
+ }
+ }
+ return
+}
+
+func (scope *Scope) row() *sql.Row {
+ defer scope.trace(NowFunc())
+
+ result := &RowQueryResult{}
+ scope.InstanceSet("row_query_result", result)
+ scope.callCallbacks(scope.db.parent.callbacks.rowQueries)
+
+ return result.Row
+}
+
+func (scope *Scope) rows() (*sql.Rows, error) {
+ defer scope.trace(NowFunc())
+
+ result := &RowsQueryResult{}
+ scope.InstanceSet("row_query_result", result)
+ scope.callCallbacks(scope.db.parent.callbacks.rowQueries)
+
+ return result.Rows, result.Error
+}
+
+func (scope *Scope) initialize() *Scope {
+ for _, clause := range scope.Search.whereConditions {
+ scope.updatedAttrsWithValues(clause["query"])
+ }
+ scope.updatedAttrsWithValues(scope.Search.initAttrs)
+ scope.updatedAttrsWithValues(scope.Search.assignAttrs)
+ return scope
+}
+
+func (scope *Scope) isQueryForColumn(query interface{}, column string) bool {
+ queryStr := strings.ToLower(fmt.Sprint(query))
+ if queryStr == column {
+ return true
+ }
+
+ if strings.HasSuffix(queryStr, "as "+column) {
+ return true
+ }
+
+ if strings.HasSuffix(queryStr, "as "+scope.Quote(column)) {
+ return true
+ }
+
+ return false
+}
+
+func (scope *Scope) pluck(column string, value interface{}) *Scope {
+ dest := reflect.Indirect(reflect.ValueOf(value))
+ if dest.Kind() != reflect.Slice {
+ scope.Err(fmt.Errorf("results should be a slice, not %s", dest.Kind()))
+ return scope
+ }
+
+ if dest.Len() > 0 {
+ dest.Set(reflect.Zero(dest.Type()))
+ }
+
+ if query, ok := scope.Search.selects["query"]; !ok || !scope.isQueryForColumn(query, column) {
+ scope.Search.Select(column)
+ }
+
+ rows, err := scope.rows()
+ if scope.Err(err) == nil {
+ defer rows.Close()
+ for rows.Next() {
+ elem := reflect.New(dest.Type().Elem()).Interface()
+ scope.Err(rows.Scan(elem))
+ dest.Set(reflect.Append(dest, reflect.ValueOf(elem).Elem()))
+ }
+
+ if err := rows.Err(); err != nil {
+ scope.Err(err)
+ }
+ }
+ return scope
+}
+
+func (scope *Scope) count(value interface{}) *Scope {
+ if query, ok := scope.Search.selects["query"]; !ok || !countingQueryRegexp.MatchString(fmt.Sprint(query)) {
+ if len(scope.Search.group) != 0 {
+ if len(scope.Search.havingConditions) != 0 {
+ scope.prepareQuerySQL()
+ scope.Search = &search{}
+ scope.Search.Select("count(*)")
+ scope.Search.Table(fmt.Sprintf("( %s ) AS count_table", scope.SQL))
+ } else {
+ scope.Search.Select("count(*) FROM ( SELECT count(*) as name ")
+ scope.Search.group += " ) AS count_table"
+ }
+ } else {
+ scope.Search.Select("count(*)")
+ }
+ }
+ scope.Search.ignoreOrderQuery = true
+ scope.Err(scope.row().Scan(value))
+ return scope
+}
+
+func (scope *Scope) typeName() string {
+ typ := scope.IndirectValue().Type()
+
+ for typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+
+ return typ.Name()
+}
+
+// trace print sql log
+func (scope *Scope) trace(t time.Time) {
+ if len(scope.SQL) > 0 {
+ scope.db.slog(scope.SQL, t, scope.SQLVars...)
+ }
+}
+
+func (scope *Scope) changeableField(field *Field) bool {
+ if selectAttrs := scope.SelectAttrs(); len(selectAttrs) > 0 {
+ for _, attr := range selectAttrs {
+ if field.Name == attr || field.DBName == attr {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, attr := range scope.OmitAttrs() {
+ if field.Name == attr || field.DBName == attr {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (scope *Scope) related(value interface{}, foreignKeys ...string) *Scope {
+ toScope := scope.db.NewScope(value)
+ tx := scope.db.Set("gorm:association:source", scope.Value)
+
+ for _, foreignKey := range append(foreignKeys, toScope.typeName()+"Id", scope.typeName()+"Id") {
+ fromField, _ := scope.FieldByName(foreignKey)
+ toField, _ := toScope.FieldByName(foreignKey)
+
+ if fromField != nil {
+ if relationship := fromField.Relationship; relationship != nil {
+ if relationship.Kind == "many_to_many" {
+ joinTableHandler := relationship.JoinTableHandler
+ scope.Err(joinTableHandler.JoinWith(joinTableHandler, tx, scope.Value).Find(value).Error)
+ } else if relationship.Kind == "belongs_to" {
+ for idx, foreignKey := range relationship.ForeignDBNames {
+ if field, ok := scope.FieldByName(foreignKey); ok {
+ tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.AssociationForeignDBNames[idx])), field.Field.Interface())
+ }
+ }
+ scope.Err(tx.Find(value).Error)
+ } else if relationship.Kind == "has_many" || relationship.Kind == "has_one" {
+ for idx, foreignKey := range relationship.ForeignDBNames {
+ if field, ok := scope.FieldByName(relationship.AssociationForeignDBNames[idx]); ok {
+ tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
+ }
+ }
+
+ if relationship.PolymorphicType != "" {
+ tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue)
+ }
+ scope.Err(tx.Find(value).Error)
+ }
+ } else {
+ sql := fmt.Sprintf("%v = ?", scope.Quote(toScope.PrimaryKey()))
+ scope.Err(tx.Where(sql, fromField.Field.Interface()).Find(value).Error)
+ }
+ return scope
+ } else if toField != nil {
+ sql := fmt.Sprintf("%v = ?", scope.Quote(toField.DBName))
+ scope.Err(tx.Where(sql, scope.PrimaryKeyValue()).Find(value).Error)
+ return scope
+ }
+ }
+
+ scope.Err(fmt.Errorf("invalid association %v", foreignKeys))
+ return scope
+}
+
+// getTableOptions return the table options string or an empty string if the table options does not exist
+func (scope *Scope) getTableOptions() string {
+ tableOptions, ok := scope.Get("gorm:table_options")
+ if !ok {
+ return ""
+ }
+ return " " + tableOptions.(string)
+}
+
+func (scope *Scope) createJoinTable(field *StructField) {
+ if relationship := field.Relationship; relationship != nil && relationship.JoinTableHandler != nil {
+ joinTableHandler := relationship.JoinTableHandler
+ joinTable := joinTableHandler.Table(scope.db)
+ if !scope.Dialect().HasTable(joinTable) {
+ toScope := &Scope{Value: reflect.New(field.Struct.Type).Interface()}
+
+ var sqlTypes, primaryKeys []string
+ for idx, fieldName := range relationship.ForeignFieldNames {
+ if field, ok := scope.FieldByName(fieldName); ok {
+ foreignKeyStruct := field.clone()
+ foreignKeyStruct.IsPrimaryKey = false
+ foreignKeyStruct.TagSettingsSet("IS_JOINTABLE_FOREIGNKEY", "true")
+ foreignKeyStruct.TagSettingsDelete("AUTO_INCREMENT")
+ sqlTypes = append(sqlTypes, scope.Quote(relationship.ForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct))
+ primaryKeys = append(primaryKeys, scope.Quote(relationship.ForeignDBNames[idx]))
+ }
+ }
+
+ for idx, fieldName := range relationship.AssociationForeignFieldNames {
+ if field, ok := toScope.FieldByName(fieldName); ok {
+ foreignKeyStruct := field.clone()
+ foreignKeyStruct.IsPrimaryKey = false
+ foreignKeyStruct.TagSettingsSet("IS_JOINTABLE_FOREIGNKEY", "true")
+ foreignKeyStruct.TagSettingsDelete("AUTO_INCREMENT")
+ sqlTypes = append(sqlTypes, scope.Quote(relationship.AssociationForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct))
+ primaryKeys = append(primaryKeys, scope.Quote(relationship.AssociationForeignDBNames[idx]))
+ }
+ }
+
+ scope.Err(scope.NewDB().Exec(fmt.Sprintf("CREATE TABLE %v (%v, PRIMARY KEY (%v))%s", scope.Quote(joinTable), strings.Join(sqlTypes, ","), strings.Join(primaryKeys, ","), scope.getTableOptions())).Error)
+ }
+ scope.NewDB().Table(joinTable).AutoMigrate(joinTableHandler)
+ }
+}
+
+func (scope *Scope) createTable() *Scope {
+ var tags []string
+ var primaryKeys []string
+ var primaryKeyInColumnType = false
+ for _, field := range scope.GetModelStruct().StructFields {
+ if field.IsNormal {
+ sqlTag := scope.Dialect().DataTypeOf(field)
+
+ // Check if the primary key constraint was specified as
+ // part of the column type. If so, we can only support
+ // one column as the primary key.
+ if strings.Contains(strings.ToLower(sqlTag), "primary key") {
+ primaryKeyInColumnType = true
+ }
+
+ tags = append(tags, scope.Quote(field.DBName)+" "+sqlTag)
+ }
+
+ if field.IsPrimaryKey {
+ primaryKeys = append(primaryKeys, scope.Quote(field.DBName))
+ }
+ scope.createJoinTable(field)
+ }
+
+ var primaryKeyStr string
+ if len(primaryKeys) > 0 && !primaryKeyInColumnType {
+ primaryKeyStr = fmt.Sprintf(", PRIMARY KEY (%v)", strings.Join(primaryKeys, ","))
+ }
+
+ scope.Raw(fmt.Sprintf("CREATE TABLE %v (%v %v)%s", scope.QuotedTableName(), strings.Join(tags, ","), primaryKeyStr, scope.getTableOptions())).Exec()
+
+ scope.autoIndex()
+ return scope
+}
+
+func (scope *Scope) dropTable() *Scope {
+ scope.Raw(fmt.Sprintf("DROP TABLE %v", scope.QuotedTableName())).Exec()
+ return scope
+}
+
+func (scope *Scope) modifyColumn(column string, typ string) {
+ scope.db.AddError(scope.Dialect().ModifyColumn(scope.QuotedTableName(), scope.Quote(column), typ))
+}
+
+func (scope *Scope) dropColumn(column string) {
+ scope.Raw(fmt.Sprintf("ALTER TABLE %v DROP COLUMN %v", scope.QuotedTableName(), scope.Quote(column))).Exec()
+}
+
+func (scope *Scope) addIndex(unique bool, indexName string, column ...string) {
+ if scope.Dialect().HasIndex(scope.TableName(), indexName) {
+ return
+ }
+
+ var columns []string
+ for _, name := range column {
+ columns = append(columns, scope.quoteIfPossible(name))
+ }
+
+ sqlCreate := "CREATE INDEX"
+ if unique {
+ sqlCreate = "CREATE UNIQUE INDEX"
+ }
+
+ scope.Raw(fmt.Sprintf("%s %v ON %v(%v) %v", sqlCreate, indexName, scope.QuotedTableName(), strings.Join(columns, ", "), scope.whereSQL())).Exec()
+}
+
+func (scope *Scope) addForeignKey(field string, dest string, onDelete string, onUpdate string) {
+ // Compatible with old generated key
+ keyName := scope.Dialect().BuildKeyName(scope.TableName(), field, dest, "foreign")
+
+ if scope.Dialect().HasForeignKey(scope.TableName(), keyName) {
+ return
+ }
+ var query = `ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s ON DELETE %s ON UPDATE %s;`
+ scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.quoteIfPossible(keyName), scope.quoteIfPossible(field), dest, onDelete, onUpdate)).Exec()
+}
+
+func (scope *Scope) removeForeignKey(field string, dest string) {
+ keyName := scope.Dialect().BuildKeyName(scope.TableName(), field, dest, "foreign")
+ if !scope.Dialect().HasForeignKey(scope.TableName(), keyName) {
+ return
+ }
+ var mysql mysql
+ var query string
+ if scope.Dialect().GetName() == mysql.GetName() {
+ query = `ALTER TABLE %s DROP FOREIGN KEY %s;`
+ } else {
+ query = `ALTER TABLE %s DROP CONSTRAINT %s;`
+ }
+
+ scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.quoteIfPossible(keyName))).Exec()
+}
+
+func (scope *Scope) removeIndex(indexName string) {
+ scope.Dialect().RemoveIndex(scope.TableName(), indexName)
+}
+
+func (scope *Scope) autoMigrate() *Scope {
+ tableName := scope.TableName()
+ quotedTableName := scope.QuotedTableName()
+
+ if !scope.Dialect().HasTable(tableName) {
+ scope.createTable()
+ } else {
+ for _, field := range scope.GetModelStruct().StructFields {
+ if !scope.Dialect().HasColumn(tableName, field.DBName) {
+ if field.IsNormal {
+ sqlTag := scope.Dialect().DataTypeOf(field)
+ scope.Raw(fmt.Sprintf("ALTER TABLE %v ADD %v %v;", quotedTableName, scope.Quote(field.DBName), sqlTag)).Exec()
+ }
+ }
+ scope.createJoinTable(field)
+ }
+ scope.autoIndex()
+ }
+ return scope
+}
+
+func (scope *Scope) autoIndex() *Scope {
+ var indexes = map[string][]string{}
+ var uniqueIndexes = map[string][]string{}
+
+ for _, field := range scope.GetStructFields() {
+ if name, ok := field.TagSettingsGet("INDEX"); ok {
+ names := strings.Split(name, ",")
+
+ for _, name := range names {
+ if name == "INDEX" || name == "" {
+ name = scope.Dialect().BuildKeyName("idx", scope.TableName(), field.DBName)
+ }
+ name, column := scope.Dialect().NormalizeIndexAndColumn(name, field.DBName)
+ indexes[name] = append(indexes[name], column)
+ }
+ }
+
+ if name, ok := field.TagSettingsGet("UNIQUE_INDEX"); ok {
+ names := strings.Split(name, ",")
+
+ for _, name := range names {
+ if name == "UNIQUE_INDEX" || name == "" {
+ name = scope.Dialect().BuildKeyName("uix", scope.TableName(), field.DBName)
+ }
+ name, column := scope.Dialect().NormalizeIndexAndColumn(name, field.DBName)
+ uniqueIndexes[name] = append(uniqueIndexes[name], column)
+ }
+ }
+ }
+
+ for name, columns := range indexes {
+ if db := scope.NewDB().Table(scope.TableName()).Model(scope.Value).AddIndex(name, columns...); db.Error != nil {
+ scope.db.AddError(db.Error)
+ }
+ }
+
+ for name, columns := range uniqueIndexes {
+ if db := scope.NewDB().Table(scope.TableName()).Model(scope.Value).AddUniqueIndex(name, columns...); db.Error != nil {
+ scope.db.AddError(db.Error)
+ }
+ }
+
+ return scope
+}
+
+func (scope *Scope) getColumnAsArray(columns []string, values ...interface{}) (results [][]interface{}) {
+ resultMap := make(map[string][]interface{})
+ for _, value := range values {
+ indirectValue := indirect(reflect.ValueOf(value))
+
+ switch indirectValue.Kind() {
+ case reflect.Slice:
+ for i := 0; i < indirectValue.Len(); i++ {
+ var result []interface{}
+ var object = indirect(indirectValue.Index(i))
+ var hasValue = false
+ for _, column := range columns {
+ field := object.FieldByName(column)
+ if hasValue || !isBlank(field) {
+ hasValue = true
+ }
+ result = append(result, field.Interface())
+ }
+
+ if hasValue {
+ h := fmt.Sprint(result...)
+ if _, exist := resultMap[h]; !exist {
+ resultMap[h] = result
+ }
+ }
+ }
+ case reflect.Struct:
+ var result []interface{}
+ var hasValue = false
+ for _, column := range columns {
+ field := indirectValue.FieldByName(column)
+ if hasValue || !isBlank(field) {
+ hasValue = true
+ }
+ result = append(result, field.Interface())
+ }
+
+ if hasValue {
+ h := fmt.Sprint(result...)
+ if _, exist := resultMap[h]; !exist {
+ resultMap[h] = result
+ }
+ }
+ }
+ }
+ for _, v := range resultMap {
+ results = append(results, v)
+ }
+ return
+}
+
+func (scope *Scope) getColumnAsScope(column string) *Scope {
+ indirectScopeValue := scope.IndirectValue()
+
+ switch indirectScopeValue.Kind() {
+ case reflect.Slice:
+ if fieldStruct, ok := scope.GetModelStruct().ModelType.FieldByName(column); ok {
+ fieldType := fieldStruct.Type
+ if fieldType.Kind() == reflect.Slice || fieldType.Kind() == reflect.Ptr {
+ fieldType = fieldType.Elem()
+ }
+
+ resultsMap := map[interface{}]bool{}
+ results := reflect.New(reflect.SliceOf(reflect.PtrTo(fieldType))).Elem()
+
+ for i := 0; i < indirectScopeValue.Len(); i++ {
+ result := indirect(indirect(indirectScopeValue.Index(i)).FieldByName(column))
+
+ if result.Kind() == reflect.Slice {
+ for j := 0; j < result.Len(); j++ {
+ if elem := result.Index(j); elem.CanAddr() && resultsMap[elem.Addr()] != true {
+ resultsMap[elem.Addr()] = true
+ results = reflect.Append(results, elem.Addr())
+ }
+ }
+ } else if result.CanAddr() && resultsMap[result.Addr()] != true {
+ resultsMap[result.Addr()] = true
+ results = reflect.Append(results, result.Addr())
+ }
+ }
+ return scope.New(results.Interface())
+ }
+ case reflect.Struct:
+ if field := indirectScopeValue.FieldByName(column); field.CanAddr() {
+ return scope.New(field.Addr().Interface())
+ }
+ }
+ return nil
+}
+
+func (scope *Scope) hasConditions() bool {
+ return !scope.PrimaryKeyZero() ||
+ len(scope.Search.whereConditions) > 0 ||
+ len(scope.Search.orConditions) > 0 ||
+ len(scope.Search.notConditions) > 0
+}
diff --git a/vendor/github.com/jinzhu/gorm/search.go b/vendor/github.com/jinzhu/gorm/search.go
new file mode 100644
index 0000000..52ae2ef
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/search.go
@@ -0,0 +1,203 @@
+package gorm
+
+import (
+ "fmt"
+)
+
+type search struct {
+ db *DB
+ whereConditions []map[string]interface{}
+ orConditions []map[string]interface{}
+ notConditions []map[string]interface{}
+ havingConditions []map[string]interface{}
+ joinConditions []map[string]interface{}
+ initAttrs []interface{}
+ assignAttrs []interface{}
+ selects map[string]interface{}
+ omits []string
+ orders []interface{}
+ preload []searchPreload
+ offset interface{}
+ limit interface{}
+ group string
+ tableName string
+ raw bool
+ Unscoped bool
+ ignoreOrderQuery bool
+}
+
+type searchPreload struct {
+ schema string
+ conditions []interface{}
+}
+
+func (s *search) clone() *search {
+ clone := search{
+ db: s.db,
+ whereConditions: make([]map[string]interface{}, len(s.whereConditions)),
+ orConditions: make([]map[string]interface{}, len(s.orConditions)),
+ notConditions: make([]map[string]interface{}, len(s.notConditions)),
+ havingConditions: make([]map[string]interface{}, len(s.havingConditions)),
+ joinConditions: make([]map[string]interface{}, len(s.joinConditions)),
+ initAttrs: make([]interface{}, len(s.initAttrs)),
+ assignAttrs: make([]interface{}, len(s.assignAttrs)),
+ selects: s.selects,
+ omits: make([]string, len(s.omits)),
+ orders: make([]interface{}, len(s.orders)),
+ preload: make([]searchPreload, len(s.preload)),
+ offset: s.offset,
+ limit: s.limit,
+ group: s.group,
+ tableName: s.tableName,
+ raw: s.raw,
+ Unscoped: s.Unscoped,
+ ignoreOrderQuery: s.ignoreOrderQuery,
+ }
+ for i, value := range s.whereConditions {
+ clone.whereConditions[i] = value
+ }
+ for i, value := range s.orConditions {
+ clone.orConditions[i] = value
+ }
+ for i, value := range s.notConditions {
+ clone.notConditions[i] = value
+ }
+ for i, value := range s.havingConditions {
+ clone.havingConditions[i] = value
+ }
+ for i, value := range s.joinConditions {
+ clone.joinConditions[i] = value
+ }
+ for i, value := range s.initAttrs {
+ clone.initAttrs[i] = value
+ }
+ for i, value := range s.assignAttrs {
+ clone.assignAttrs[i] = value
+ }
+ for i, value := range s.omits {
+ clone.omits[i] = value
+ }
+ for i, value := range s.orders {
+ clone.orders[i] = value
+ }
+ for i, value := range s.preload {
+ clone.preload[i] = value
+ }
+ return &clone
+}
+
+func (s *search) Where(query interface{}, values ...interface{}) *search {
+ s.whereConditions = append(s.whereConditions, map[string]interface{}{"query": query, "args": values})
+ return s
+}
+
+func (s *search) Not(query interface{}, values ...interface{}) *search {
+ s.notConditions = append(s.notConditions, map[string]interface{}{"query": query, "args": values})
+ return s
+}
+
+func (s *search) Or(query interface{}, values ...interface{}) *search {
+ s.orConditions = append(s.orConditions, map[string]interface{}{"query": query, "args": values})
+ return s
+}
+
+func (s *search) Attrs(attrs ...interface{}) *search {
+ s.initAttrs = append(s.initAttrs, toSearchableMap(attrs...))
+ return s
+}
+
+func (s *search) Assign(attrs ...interface{}) *search {
+ s.assignAttrs = append(s.assignAttrs, toSearchableMap(attrs...))
+ return s
+}
+
+func (s *search) Order(value interface{}, reorder ...bool) *search {
+ if len(reorder) > 0 && reorder[0] {
+ s.orders = []interface{}{}
+ }
+
+ if value != nil && value != "" {
+ s.orders = append(s.orders, value)
+ }
+ return s
+}
+
+func (s *search) Select(query interface{}, args ...interface{}) *search {
+ s.selects = map[string]interface{}{"query": query, "args": args}
+ return s
+}
+
+func (s *search) Omit(columns ...string) *search {
+ s.omits = columns
+ return s
+}
+
+func (s *search) Limit(limit interface{}) *search {
+ s.limit = limit
+ return s
+}
+
+func (s *search) Offset(offset interface{}) *search {
+ s.offset = offset
+ return s
+}
+
+func (s *search) Group(query string) *search {
+ s.group = s.getInterfaceAsSQL(query)
+ return s
+}
+
+func (s *search) Having(query interface{}, values ...interface{}) *search {
+ if val, ok := query.(*SqlExpr); ok {
+ s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": val.expr, "args": val.args})
+ } else {
+ s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": query, "args": values})
+ }
+ return s
+}
+
+func (s *search) Joins(query string, values ...interface{}) *search {
+ s.joinConditions = append(s.joinConditions, map[string]interface{}{"query": query, "args": values})
+ return s
+}
+
+func (s *search) Preload(schema string, values ...interface{}) *search {
+ var preloads []searchPreload
+ for _, preload := range s.preload {
+ if preload.schema != schema {
+ preloads = append(preloads, preload)
+ }
+ }
+ preloads = append(preloads, searchPreload{schema, values})
+ s.preload = preloads
+ return s
+}
+
+func (s *search) Raw(b bool) *search {
+ s.raw = b
+ return s
+}
+
+func (s *search) unscoped() *search {
+ s.Unscoped = true
+ return s
+}
+
+func (s *search) Table(name string) *search {
+ s.tableName = name
+ return s
+}
+
+func (s *search) getInterfaceAsSQL(value interface{}) (str string) {
+ switch value.(type) {
+ case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
+ str = fmt.Sprintf("%v", value)
+ default:
+ s.db.AddError(ErrInvalidSQL)
+ }
+
+ if str == "-1" {
+ return ""
+ }
+ return
+}
diff --git a/vendor/github.com/jinzhu/gorm/test_all.sh b/vendor/github.com/jinzhu/gorm/test_all.sh
new file mode 100644
index 0000000..5cfb332
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/test_all.sh
@@ -0,0 +1,5 @@
+dialects=("postgres" "mysql" "mssql" "sqlite")
+
+for dialect in "${dialects[@]}" ; do
+ DEBUG=false GORM_DIALECT=${dialect} go test
+done
diff --git a/vendor/github.com/jinzhu/gorm/utils.go b/vendor/github.com/jinzhu/gorm/utils.go
new file mode 100644
index 0000000..d2ae946
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/utils.go
@@ -0,0 +1,226 @@
+package gorm
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+// NowFunc returns current time, this function is exported in order to be able
+// to give the flexibility to the developer to customize it according to their
+// needs, e.g:
+// gorm.NowFunc = func() time.Time {
+// return time.Now().UTC()
+// }
+var NowFunc = func() time.Time {
+ return time.Now()
+}
+
+// Copied from golint
+var commonInitialisms = []string{"API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "UID", "UI", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS"}
+var commonInitialismsReplacer *strings.Replacer
+
+var goSrcRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*.go`)
+var goTestRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*test.go`)
+
+func init() {
+ var commonInitialismsForReplacer []string
+ for _, initialism := range commonInitialisms {
+ commonInitialismsForReplacer = append(commonInitialismsForReplacer, initialism, strings.Title(strings.ToLower(initialism)))
+ }
+ commonInitialismsReplacer = strings.NewReplacer(commonInitialismsForReplacer...)
+}
+
+type safeMap struct {
+ m map[string]string
+ l *sync.RWMutex
+}
+
+func (s *safeMap) Set(key string, value string) {
+ s.l.Lock()
+ defer s.l.Unlock()
+ s.m[key] = value
+}
+
+func (s *safeMap) Get(key string) string {
+ s.l.RLock()
+ defer s.l.RUnlock()
+ return s.m[key]
+}
+
+func newSafeMap() *safeMap {
+ return &safeMap{l: new(sync.RWMutex), m: make(map[string]string)}
+}
+
+// SQL expression
+type SqlExpr struct {
+ expr string
+ args []interface{}
+}
+
+// Expr generate raw SQL expression, for example:
+// DB.Model(&product).Update("price", gorm.Expr("price * ? + ?", 2, 100))
+func Expr(expression string, args ...interface{}) *SqlExpr {
+ return &SqlExpr{expr: expression, args: args}
+}
+
+func indirect(reflectValue reflect.Value) reflect.Value {
+ for reflectValue.Kind() == reflect.Ptr {
+ reflectValue = reflectValue.Elem()
+ }
+ return reflectValue
+}
+
+func toQueryMarks(primaryValues [][]interface{}) string {
+ var results []string
+
+ for _, primaryValue := range primaryValues {
+ var marks []string
+ for range primaryValue {
+ marks = append(marks, "?")
+ }
+
+ if len(marks) > 1 {
+ results = append(results, fmt.Sprintf("(%v)", strings.Join(marks, ",")))
+ } else {
+ results = append(results, strings.Join(marks, ""))
+ }
+ }
+ return strings.Join(results, ",")
+}
+
+func toQueryCondition(scope *Scope, columns []string) string {
+ var newColumns []string
+ for _, column := range columns {
+ newColumns = append(newColumns, scope.Quote(column))
+ }
+
+ if len(columns) > 1 {
+ return fmt.Sprintf("(%v)", strings.Join(newColumns, ","))
+ }
+ return strings.Join(newColumns, ",")
+}
+
+func toQueryValues(values [][]interface{}) (results []interface{}) {
+ for _, value := range values {
+ for _, v := range value {
+ results = append(results, v)
+ }
+ }
+ return
+}
+
+func fileWithLineNum() string {
+ for i := 2; i < 15; i++ {
+ _, file, line, ok := runtime.Caller(i)
+ if ok && (!goSrcRegexp.MatchString(file) || goTestRegexp.MatchString(file)) {
+ return fmt.Sprintf("%v:%v", file, line)
+ }
+ }
+ return ""
+}
+
+func isBlank(value reflect.Value) bool {
+ switch value.Kind() {
+ case reflect.String:
+ return value.Len() == 0
+ case reflect.Bool:
+ return !value.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return value.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return value.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return value.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return value.IsNil()
+ }
+
+ return reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface())
+}
+
+func toSearchableMap(attrs ...interface{}) (result interface{}) {
+ if len(attrs) > 1 {
+ if str, ok := attrs[0].(string); ok {
+ result = map[string]interface{}{str: attrs[1]}
+ }
+ } else if len(attrs) == 1 {
+ if attr, ok := attrs[0].(map[string]interface{}); ok {
+ result = attr
+ }
+
+ if attr, ok := attrs[0].(interface{}); ok {
+ result = attr
+ }
+ }
+ return
+}
+
+func equalAsString(a interface{}, b interface{}) bool {
+ return toString(a) == toString(b)
+}
+
+func toString(str interface{}) string {
+ if values, ok := str.([]interface{}); ok {
+ var results []string
+ for _, value := range values {
+ results = append(results, toString(value))
+ }
+ return strings.Join(results, "_")
+ } else if bytes, ok := str.([]byte); ok {
+ return string(bytes)
+ } else if reflectValue := reflect.Indirect(reflect.ValueOf(str)); reflectValue.IsValid() {
+ return fmt.Sprintf("%v", reflectValue.Interface())
+ }
+ return ""
+}
+
+func makeSlice(elemType reflect.Type) interface{} {
+ if elemType.Kind() == reflect.Slice {
+ elemType = elemType.Elem()
+ }
+ sliceType := reflect.SliceOf(elemType)
+ slice := reflect.New(sliceType)
+ slice.Elem().Set(reflect.MakeSlice(sliceType, 0, 0))
+ return slice.Interface()
+}
+
+func strInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+// getValueFromFields return given fields's value
+func getValueFromFields(value reflect.Value, fieldNames []string) (results []interface{}) {
+ // If value is a nil pointer, Indirect returns a zero Value!
+ // Therefor we need to check for a zero value,
+ // as FieldByName could panic
+ if indirectValue := reflect.Indirect(value); indirectValue.IsValid() {
+ for _, fieldName := range fieldNames {
+ if fieldValue := reflect.Indirect(indirectValue.FieldByName(fieldName)); fieldValue.IsValid() {
+ result := fieldValue.Interface()
+ if r, ok := result.(driver.Valuer); ok {
+ result, _ = r.Value()
+ }
+ results = append(results, result)
+ }
+ }
+ }
+ return
+}
+
+func addExtraSpaceIfExist(str string) string {
+ if str != "" {
+ return " " + str
+ }
+ return ""
+}
diff --git a/vendor/github.com/jinzhu/gorm/wercker.yml b/vendor/github.com/jinzhu/gorm/wercker.yml
new file mode 100644
index 0000000..1de947b
--- /dev/null
+++ b/vendor/github.com/jinzhu/gorm/wercker.yml
@@ -0,0 +1,149 @@
+# use the default golang container from Docker Hub
+box: golang
+
+services:
+ - name: mariadb
+ id: mariadb:latest
+ env:
+ MYSQL_DATABASE: gorm
+ MYSQL_USER: gorm
+ MYSQL_PASSWORD: gorm
+ MYSQL_RANDOM_ROOT_PASSWORD: "yes"
+ - name: mysql
+ id: mysql:latest
+ env:
+ MYSQL_DATABASE: gorm
+ MYSQL_USER: gorm
+ MYSQL_PASSWORD: gorm
+ MYSQL_RANDOM_ROOT_PASSWORD: "yes"
+ - name: mysql57
+ id: mysql:5.7
+ env:
+ MYSQL_DATABASE: gorm
+ MYSQL_USER: gorm
+ MYSQL_PASSWORD: gorm
+ MYSQL_RANDOM_ROOT_PASSWORD: "yes"
+ - name: mysql56
+ id: mysql:5.6
+ env:
+ MYSQL_DATABASE: gorm
+ MYSQL_USER: gorm
+ MYSQL_PASSWORD: gorm
+ MYSQL_RANDOM_ROOT_PASSWORD: "yes"
+ - name: postgres
+ id: postgres:latest
+ env:
+ POSTGRES_USER: gorm
+ POSTGRES_PASSWORD: gorm
+ POSTGRES_DB: gorm
+ - name: postgres96
+ id: postgres:9.6
+ env:
+ POSTGRES_USER: gorm
+ POSTGRES_PASSWORD: gorm
+ POSTGRES_DB: gorm
+ - name: postgres95
+ id: postgres:9.5
+ env:
+ POSTGRES_USER: gorm
+ POSTGRES_PASSWORD: gorm
+ POSTGRES_DB: gorm
+ - name: postgres94
+ id: postgres:9.4
+ env:
+ POSTGRES_USER: gorm
+ POSTGRES_PASSWORD: gorm
+ POSTGRES_DB: gorm
+ - name: postgres93
+ id: postgres:9.3
+ env:
+ POSTGRES_USER: gorm
+ POSTGRES_PASSWORD: gorm
+ POSTGRES_DB: gorm
+ - name: mssql
+ id: mcmoe/mssqldocker:latest
+ env:
+ ACCEPT_EULA: Y
+ SA_PASSWORD: LoremIpsum86
+ MSSQL_DB: gorm
+ MSSQL_USER: gorm
+ MSSQL_PASSWORD: LoremIpsum86
+
+# The steps that will be executed in the build pipeline
+build:
+ # The steps that will be executed on build
+ steps:
+ # Sets the go workspace and places you package
+ # at the right place in the workspace tree
+ - setup-go-workspace
+
+ # Gets the dependencies
+ - script:
+ name: go get
+ code: |
+ cd $WERCKER_SOURCE_DIR
+ go version
+ go get -t -v ./...
+
+ # Build the project
+ - script:
+ name: go build
+ code: |
+ go build ./...
+
+ # Test the project
+ - script:
+ name: test sqlite
+ code: |
+ go test -race -v ./...
+
+ - script:
+ name: test mariadb
+ code: |
+ GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mariadb:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
+
+ - script:
+ name: test mysql
+ code: |
+ GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
+
+ - script:
+ name: test mysql5.7
+ code: |
+ GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql57:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
+
+ - script:
+ name: test mysql5.6
+ code: |
+ GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql56:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
+
+ - script:
+ name: test postgres
+ code: |
+ GORM_DIALECT=postgres GORM_DSN="host=postgres user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
+
+ - script:
+ name: test postgres96
+ code: |
+ GORM_DIALECT=postgres GORM_DSN="host=postgres96 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
+
+ - script:
+ name: test postgres95
+ code: |
+ GORM_DIALECT=postgres GORM_DSN="host=postgres95 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
+
+ - script:
+ name: test postgres94
+ code: |
+ GORM_DIALECT=postgres GORM_DSN="host=postgres94 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
+
+ - script:
+ name: test postgres93
+ code: |
+ GORM_DIALECT=postgres GORM_DSN="host=postgres93 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
+
+ - script:
+ name: codecov
+ code: |
+ go test -race -coverprofile=coverage.txt -covermode=atomic ./...
+ bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE
new file mode 100644
index 0000000..a1ca9a0
--- /dev/null
+++ b/vendor/github.com/jinzhu/inflection/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 - Jinzhu
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md
new file mode 100644
index 0000000..a3de336
--- /dev/null
+++ b/vendor/github.com/jinzhu/inflection/README.md
@@ -0,0 +1,55 @@
+# Inflection
+
+Inflection pluralizes and singularizes English nouns
+
+[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930)
+
+## Basic Usage
+
+```go
+inflection.Plural("person") => "people"
+inflection.Plural("Person") => "People"
+inflection.Plural("PERSON") => "PEOPLE"
+inflection.Plural("bus") => "buses"
+inflection.Plural("BUS") => "BUSES"
+inflection.Plural("Bus") => "Buses"
+
+inflection.Singular("people") => "person"
+inflection.Singular("People") => "Person"
+inflection.Singular("PEOPLE") => "PERSON"
+inflection.Singular("buses") => "bus"
+inflection.Singular("BUSES") => "BUS"
+inflection.Singular("Buses") => "Bus"
+
+inflection.Plural("FancyPerson") => "FancyPeople"
+inflection.Singular("FancyPeople") => "FancyPerson"
+```
+
+## Register Rules
+
+Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
+
+If you want to register more rules, follow:
+
+```
+inflection.AddUncountable("fish")
+inflection.AddIrregular("person", "people")
+inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
+inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
+```
+
+## Contributing
+
+You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
+
+## Author
+
+**jinzhu**
+
+*
+*
+*
+
+## License
+
+Released under the [MIT License](http://www.opensource.org/licenses/MIT).
diff --git a/vendor/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go
new file mode 100644
index 0000000..606263b
--- /dev/null
+++ b/vendor/github.com/jinzhu/inflection/inflections.go
@@ -0,0 +1,273 @@
+/*
+Package inflection pluralizes and singularizes English nouns.
+
+ inflection.Plural("person") => "people"
+ inflection.Plural("Person") => "People"
+ inflection.Plural("PERSON") => "PEOPLE"
+
+ inflection.Singular("people") => "person"
+ inflection.Singular("People") => "Person"
+ inflection.Singular("PEOPLE") => "PERSON"
+
+ inflection.Plural("FancyPerson") => "FancydPeople"
+ inflection.Singular("FancyPeople") => "FancydPerson"
+
+Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
+
+If you want to register more rules, follow:
+
+ inflection.AddUncountable("fish")
+ inflection.AddIrregular("person", "people")
+ inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
+ inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
+*/
+package inflection
+
+import (
+ "regexp"
+ "strings"
+)
+
+type inflection struct {
+ regexp *regexp.Regexp
+ replace string
+}
+
+// Regular is a regexp find replace inflection
+type Regular struct {
+ find string
+ replace string
+}
+
+// Irregular is a hard replace inflection,
+// containing both singular and plural forms
+type Irregular struct {
+ singular string
+ plural string
+}
+
+// RegularSlice is a slice of Regular inflections
+type RegularSlice []Regular
+
+// IrregularSlice is a slice of Irregular inflections
+type IrregularSlice []Irregular
+
+var pluralInflections = RegularSlice{
+ {"([a-z])$", "${1}s"},
+ {"s$", "s"},
+ {"^(ax|test)is$", "${1}es"},
+ {"(octop|vir)us$", "${1}i"},
+ {"(octop|vir)i$", "${1}i"},
+ {"(alias|status)$", "${1}es"},
+ {"(bu)s$", "${1}ses"},
+ {"(buffal|tomat)o$", "${1}oes"},
+ {"([ti])um$", "${1}a"},
+ {"([ti])a$", "${1}a"},
+ {"sis$", "ses"},
+ {"(?:([^f])fe|([lr])f)$", "${1}${2}ves"},
+ {"(hive)$", "${1}s"},
+ {"([^aeiouy]|qu)y$", "${1}ies"},
+ {"(x|ch|ss|sh)$", "${1}es"},
+ {"(matr|vert|ind)(?:ix|ex)$", "${1}ices"},
+ {"^(m|l)ouse$", "${1}ice"},
+ {"^(m|l)ice$", "${1}ice"},
+ {"^(ox)$", "${1}en"},
+ {"^(oxen)$", "${1}"},
+ {"(quiz)$", "${1}zes"},
+}
+
+var singularInflections = RegularSlice{
+ {"s$", ""},
+ {"(ss)$", "${1}"},
+ {"(n)ews$", "${1}ews"},
+ {"([ti])a$", "${1}um"},
+ {"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"},
+ {"(^analy)(sis|ses)$", "${1}sis"},
+ {"([^f])ves$", "${1}fe"},
+ {"(hive)s$", "${1}"},
+ {"(tive)s$", "${1}"},
+ {"([lr])ves$", "${1}f"},
+ {"([^aeiouy]|qu)ies$", "${1}y"},
+ {"(s)eries$", "${1}eries"},
+ {"(m)ovies$", "${1}ovie"},
+ {"(c)ookies$", "${1}ookie"},
+ {"(x|ch|ss|sh)es$", "${1}"},
+ {"^(m|l)ice$", "${1}ouse"},
+ {"(bus)(es)?$", "${1}"},
+ {"(o)es$", "${1}"},
+ {"(shoe)s$", "${1}"},
+ {"(cris|test)(is|es)$", "${1}is"},
+ {"^(a)x[ie]s$", "${1}xis"},
+ {"(octop|vir)(us|i)$", "${1}us"},
+ {"(alias|status)(es)?$", "${1}"},
+ {"^(ox)en", "${1}"},
+ {"(vert|ind)ices$", "${1}ex"},
+ {"(matr)ices$", "${1}ix"},
+ {"(quiz)zes$", "${1}"},
+ {"(database)s$", "${1}"},
+}
+
+var irregularInflections = IrregularSlice{
+ {"person", "people"},
+ {"man", "men"},
+ {"child", "children"},
+ {"sex", "sexes"},
+ {"move", "moves"},
+ {"mombie", "mombies"},
+}
+
+var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"}
+
+var compiledPluralMaps []inflection
+var compiledSingularMaps []inflection
+
+func compile() {
+ compiledPluralMaps = []inflection{}
+ compiledSingularMaps = []inflection{}
+ for _, uncountable := range uncountableInflections {
+ inf := inflection{
+ regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"),
+ replace: "${1}",
+ }
+ compiledPluralMaps = append(compiledPluralMaps, inf)
+ compiledSingularMaps = append(compiledSingularMaps, inf)
+ }
+
+ for _, value := range irregularInflections {
+ infs := []inflection{
+ inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)},
+ inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)},
+ inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural},
+ }
+ compiledPluralMaps = append(compiledPluralMaps, infs...)
+ }
+
+ for _, value := range irregularInflections {
+ infs := []inflection{
+ inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)},
+ inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)},
+ inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular},
+ }
+ compiledSingularMaps = append(compiledSingularMaps, infs...)
+ }
+
+ for i := len(pluralInflections) - 1; i >= 0; i-- {
+ value := pluralInflections[i]
+ infs := []inflection{
+ inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
+ inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
+ inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
+ }
+ compiledPluralMaps = append(compiledPluralMaps, infs...)
+ }
+
+ for i := len(singularInflections) - 1; i >= 0; i-- {
+ value := singularInflections[i]
+ infs := []inflection{
+ inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
+ inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
+ inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
+ }
+ compiledSingularMaps = append(compiledSingularMaps, infs...)
+ }
+}
+
+func init() {
+ compile()
+}
+
+// AddPlural adds a plural inflection
+func AddPlural(find, replace string) {
+ pluralInflections = append(pluralInflections, Regular{find, replace})
+ compile()
+}
+
+// AddSingular adds a singular inflection
+func AddSingular(find, replace string) {
+ singularInflections = append(singularInflections, Regular{find, replace})
+ compile()
+}
+
+// AddIrregular adds an irregular inflection
+func AddIrregular(singular, plural string) {
+ irregularInflections = append(irregularInflections, Irregular{singular, plural})
+ compile()
+}
+
+// AddUncountable adds an uncountable inflection
+func AddUncountable(values ...string) {
+ uncountableInflections = append(uncountableInflections, values...)
+ compile()
+}
+
+// GetPlural retrieves the plural inflection values
+func GetPlural() RegularSlice {
+ plurals := make(RegularSlice, len(pluralInflections))
+ copy(plurals, pluralInflections)
+ return plurals
+}
+
+// GetSingular retrieves the singular inflection values
+func GetSingular() RegularSlice {
+ singulars := make(RegularSlice, len(singularInflections))
+ copy(singulars, singularInflections)
+ return singulars
+}
+
+// GetIrregular retrieves the irregular inflection values
+func GetIrregular() IrregularSlice {
+ irregular := make(IrregularSlice, len(irregularInflections))
+ copy(irregular, irregularInflections)
+ return irregular
+}
+
+// GetUncountable retrieves the uncountable inflection values
+func GetUncountable() []string {
+ uncountables := make([]string, len(uncountableInflections))
+ copy(uncountables, uncountableInflections)
+ return uncountables
+}
+
+// SetPlural sets the plural inflections slice
+func SetPlural(inflections RegularSlice) {
+ pluralInflections = inflections
+ compile()
+}
+
+// SetSingular sets the singular inflections slice
+func SetSingular(inflections RegularSlice) {
+ singularInflections = inflections
+ compile()
+}
+
+// SetIrregular sets the irregular inflections slice
+func SetIrregular(inflections IrregularSlice) {
+ irregularInflections = inflections
+ compile()
+}
+
+// SetUncountable sets the uncountable inflections slice
+func SetUncountable(inflections []string) {
+ uncountableInflections = inflections
+ compile()
+}
+
+// Plural converts a word to its plural form
+func Plural(str string) string {
+ for _, inflection := range compiledPluralMaps {
+ if inflection.regexp.MatchString(str) {
+ return inflection.regexp.ReplaceAllString(str, inflection.replace)
+ }
+ }
+ return str
+}
+
+// Singular converts a word to its singular form
+func Singular(str string) string {
+ for _, inflection := range compiledSingularMaps {
+ if inflection.regexp.MatchString(str) {
+ return inflection.regexp.ReplaceAllString(str, inflection.replace)
+ }
+ }
+ return str
+}
diff --git a/vendor/github.com/jinzhu/inflection/wercker.yml b/vendor/github.com/jinzhu/inflection/wercker.yml
new file mode 100644
index 0000000..5e6ce98
--- /dev/null
+++ b/vendor/github.com/jinzhu/inflection/wercker.yml
@@ -0,0 +1,23 @@
+box: golang
+
+build:
+ steps:
+ - setup-go-workspace
+
+ # Gets the dependencies
+ - script:
+ name: go get
+ code: |
+ go get
+
+ # Build the project
+ - script:
+ name: go build
+ code: |
+ go build ./...
+
+ # Test the project
+ - script:
+ name: go test
+ code: |
+ go test ./...
diff --git a/vendor/github.com/peterhellberg/link/LICENSE b/vendor/github.com/peterhellberg/link/LICENSE
new file mode 100644
index 0000000..c38db83
--- /dev/null
+++ b/vendor/github.com/peterhellberg/link/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2015-2022 Peter Hellberg https://c7.se
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/peterhellberg/link/README.md b/vendor/github.com/peterhellberg/link/README.md
new file mode 100644
index 0000000..0ce2677
--- /dev/null
+++ b/vendor/github.com/peterhellberg/link/README.md
@@ -0,0 +1,82 @@
+# link
+
+[![Build status](https://github.com/peterhellberg/link/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/peterhellberg/link/actions/workflows/test.yml)
+[![Go Report Card](https://goreportcard.com/badge/github.com/peterhellberg/link)](https://goreportcard.com/report/github.com/peterhellberg/link)
+[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://pkg.go.dev/github.com/peterhellberg/link)
+[![License MIT](https://img.shields.io/badge/license-MIT-lightgrey.svg?style=flat)](https://github.com/peterhellberg/link#license-mit)
+
+Parses **Link** headers used for pagination, as defined in [RFC 5988](https://tools.ietf.org/html/rfc5988).
+
+This package was originally based on , but **Parse** takes a `string` instead of `*http.Request` in this version.
+It also has the convenience functions **ParseHeader**, **ParseRequest** and **ParseResponse**.
+
+## Installation
+
+ go get -u github.com/peterhellberg/link
+
+## Exported functions
+
+ - [Parse(s string) Group](https://godoc.org/github.com/peterhellberg/link#Parse)
+ - [ParseHeader(h http.Header) Group](https://godoc.org/github.com/peterhellberg/link#ParseHeader)
+ - [ParseRequest(req \*http.Request) Group](https://godoc.org/github.com/peterhellberg/link#ParseRequest)
+ - [ParseResponse(resp \*http.Response) Group](https://godoc.org/github.com/peterhellberg/link#ParseResponse)
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/peterhellberg/link"
+)
+
+func main() {
+ for _, l := range link.Parse(`; rel="next"; foo="bar"`) {
+ fmt.Printf("URI: %q, Rel: %q, Extra: %+v\n", l.URI, l.Rel, l.Extra)
+ // URI: "https://example.com/?page=2", Rel: "next", Extra: map[foo:bar]
+ }
+
+ if resp, err := http.Get("https://api.github.com/search/code?q=Println+user:golang"); err == nil {
+ for _, l := range link.ParseResponse(resp) {
+ fmt.Printf("URI: %q, Rel: %q, Extra: %+v\n", l.URI, l.Rel, l.Extra)
+ // URI: "https://api.github.com/search/code?q=Println+user%3Agolang&page=2", Rel: "next", Extra: map[]
+ // URI: "https://api.github.com/search/code?q=Println+user%3Agolang&page=34", Rel: "last", Extra: map[]
+ }
+ }
+}
+```
+
+## Not supported
+
+ - Extended notation ([RFC 5987](https://tools.ietf.org/html/rfc5987))
+
+## Alternatives to this package
+
+ - [github.com/tent/http-link-go](https://github.com/tent/http-link-go)
+ - [github.com/swhite24/link](https://github.com/swhite24/link)
+
+## License (MIT)
+
+Copyright (c) 2015-2022 [Peter Hellberg](https://c7.se)
+
+> Permission is hereby granted, free of charge, to any person obtaining
+> a copy of this software and associated documentation files (the
+> "Software"), to deal in the Software without restriction, including
+> without limitation the rights to use, copy, modify, merge, publish,
+> distribute, sublicense, and/or sell copies of the Software, and to
+> permit persons to whom the Software is furnished to do so, subject to
+> the following conditions:
+
+> The above copyright notice and this permission notice shall be
+> included in all copies or substantial portions of the Software.
+
+> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+> MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+> NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+> LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+> OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+> WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/peterhellberg/link/doc.go b/vendor/github.com/peterhellberg/link/doc.go
new file mode 100644
index 0000000..b35e7a8
--- /dev/null
+++ b/vendor/github.com/peterhellberg/link/doc.go
@@ -0,0 +1,38 @@
+/*
+Package link parses Link headers used for pagination, as defined in RFC 5988
+
+# Installation
+
+Just go get the package:
+
+ go get -u github.com/peterhellberg/link
+
+# Usage
+
+A small usage example
+
+ package main
+
+ import (
+ "fmt"
+ "net/http"
+
+ "github.com/peterhellberg/link"
+ )
+
+ func main() {
+ for _, l := range link.Parse(`; rel="next"; foo="bar"`) {
+ fmt.Printf("URI: %q, Rel: %q, Extra: %+v\n", l.URI, l.Rel, l.Extra)
+ // URI: "https://example.com/?page=2", Rel: "next", Extra: map[foo:bar]
+ }
+
+ if resp, err := http.Get("https://api.github.com/search/code?q=Println+user:golang"); err == nil {
+ for _, l := range link.ParseResponse(resp) {
+ fmt.Printf("URI: %q, Rel: %q, Extra: %+v\n", l.URI, l.Rel, l.Extra)
+ // URI: "https://api.github.com/search/code?q=Println+user%3Agolang&page=2", Rel: "next", Extra: map[]
+ // URI: "https://api.github.com/search/code?q=Println+user%3Agolang&page=34", Rel: "last", Extra: map[]
+ }
+ }
+ }
+*/
+package link
diff --git a/vendor/github.com/peterhellberg/link/link.go b/vendor/github.com/peterhellberg/link/link.go
new file mode 100644
index 0000000..0cdb48e
--- /dev/null
+++ b/vendor/github.com/peterhellberg/link/link.go
@@ -0,0 +1,120 @@
+package link
+
+import (
+ "net/http"
+ "regexp"
+ "strings"
+)
+
+var (
+ commaRegexp = regexp.MustCompile(`,\s{0,}`)
+ valueCommaRegexp = regexp.MustCompile(`([^"]),`)
+ equalRegexp = regexp.MustCompile(` *= *`)
+ keyRegexp = regexp.MustCompile(`[a-z*]+`)
+ linkRegexp = regexp.MustCompile(`\A<(.+)>;(.+)\z`)
+ semiRegexp = regexp.MustCompile(`;\s{0,}`)
+ valRegexp = regexp.MustCompile(`"+([^"]+)"+`)
+)
+
+// Group returned by Parse, contains multiple links indexed by "rel".
+type Group map[string]*Link
+
+// Link contains a Link item with URI, Rel, and other non-URI components in Extra.
+type Link struct {
+ URI string
+ Rel string
+ Extra map[string]string
+}
+
+// String returns the URI.
+func (l *Link) String() string {
+ return l.URI
+}
+
+// ParseRequest parses the provided *http.Request into a Group.
+func ParseRequest(req *http.Request) Group {
+ if req == nil {
+ return nil
+ }
+
+ return ParseHeader(req.Header)
+}
+
+// ParseResponse parses the provided *http.Response into a Group.
+func ParseResponse(resp *http.Response) Group {
+ if resp == nil {
+ return nil
+ }
+
+ return ParseHeader(resp.Header)
+}
+
+// ParseHeader retrieves the Link header from the provided http.Header and parses it into a Group.
+func ParseHeader(h http.Header) Group {
+ if headers, found := h["Link"]; found {
+ return Parse(strings.Join(headers, ", "))
+ }
+
+ return nil
+}
+
+// Parse parses the provided string into a Group.
+func Parse(s string) Group {
+ if s == "" {
+ return nil
+ }
+
+ s = valueCommaRegexp.ReplaceAllString(s, "$1")
+
+ group := Group{}
+
+ for _, l := range commaRegexp.Split(s, -1) {
+ linkMatches := linkRegexp.FindAllStringSubmatch(l, -1)
+
+ if len(linkMatches) == 0 {
+ return nil
+ }
+
+ pieces := linkMatches[0]
+
+ link := &Link{URI: pieces[1], Extra: map[string]string{}}
+
+ for _, extra := range semiRegexp.Split(pieces[2], -1) {
+ vals := equalRegexp.Split(extra, -1)
+
+ if len(vals) != 2 {
+ continue
+ }
+
+ val := strings.TrimSpace(vals[1])
+ key := keyRegexp.FindString(vals[0])
+ vsm := valRegexp.FindStringSubmatch(vals[1])
+
+ if len(vsm) == 2 {
+ val = vsm[1]
+ }
+
+ if key == "rel" {
+ vals := strings.Split(val, " ")
+ rels := []string{vals[0]}
+
+ if len(vals) > 1 {
+ for _, v := range vals[1:] {
+ if !strings.HasPrefix(v, "http") {
+ rels = append(rels, v)
+ }
+ }
+ }
+
+ rel := strings.Join(rels, " ")
+
+ link.Rel = rel
+ group[rel] = link
+ } else {
+ link.Extra[key] = val
+ }
+ }
+ }
+
+ return group
+}
diff --git a/vendor/github.com/slack-go/slack/.gitignore b/vendor/github.com/slack-go/slack/.gitignore
new file mode 100644
index 0000000..ac6f3ee
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/.gitignore
@@ -0,0 +1,3 @@
+*.test
+*~
+.idea/
diff --git a/vendor/github.com/slack-go/slack/.golangci.yml b/vendor/github.com/slack-go/slack/.golangci.yml
new file mode 100644
index 0000000..c16f538
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/.golangci.yml
@@ -0,0 +1,14 @@
+run:
+ timeout: 6m
+ issues-exit-code: 1
+linters:
+ disable-all: true
+ enable:
+ - goimports
+ - govet
+ - interfacer
+ - misspell
+ - structcheck
+ - unconvert
+issues:
+ new: true
diff --git a/vendor/github.com/slack-go/slack/CHANGELOG.md b/vendor/github.com/slack-go/slack/CHANGELOG.md
new file mode 100644
index 0000000..32da687
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/CHANGELOG.md
@@ -0,0 +1,103 @@
+### v0.7.0 - October 2, 2020
+full differences can be viewed using `git log --oneline --decorate --color v0.6.6..v0.7.0`
+Thank you for many contributions!
+
+#### Breaking Changes
+- Add ScheduledMessage type ([#753])
+- Add description field to option block object ([#783])
+- Fix wrong conditional branch ([#782])
+ - The behavior of the user's application may change.(The current behavior is incorrect)
+
+#### Highlights
+- example: fix to start up a server ([#773])
+- example: Add explanation how the message could be sent in a proper way ([#787])
+- example: fix typo in error log ([#779])
+- refactor: Make GetConversationsParameters.ExcludeArchived optional ([#791])
+- refactor: Unify variables to "config" ([#800])
+- refactor: Rename wrong file name ([#810])
+- feature: Add SetUserRealName for change user's realName([#755])
+- feature: Add response metadata to slack response ([#772])
+- feature: Add response metadata to slack response ([#778])
+- feature: Add select block element conversations filter field ([#790])
+- feature: Add Root field to MessageEvent to support thread_broadcast subtype ([#793])
+- feature: Add bot_profile to messages ([#794])
+- doc: Add logo to README ([#813])
+- doc: Update current project status and Add changelog for v0.7.0 ([#814])
+
+[#753]: https://github.com/slack-go/slack/pull/753
+[#755]: https://github.com/slack-go/slack/pull/755
+[#772]: https://github.com/slack-go/slack/pull/772
+[#773]: https://github.com/slack-go/slack/pull/773
+[#778]: https://github.com/slack-go/slack/pull/778
+[#779]: https://github.com/slack-go/slack/pull/779
+[#782]: https://github.com/slack-go/slack/pull/782
+[#783]: https://github.com/slack-go/slack/pull/783
+[#787]: https://github.com/slack-go/slack/pull/787
+[#790]: https://github.com/slack-go/slack/pull/790
+[#791]: https://github.com/slack-go/slack/pull/791
+[#793]: https://github.com/slack-go/slack/pull/793
+[#794]: https://github.com/slack-go/slack/pull/794
+[#800]: https://github.com/slack-go/slack/pull/800
+[#810]: https://github.com/slack-go/slack/pull/810
+[#813]: https://github.com/slack-go/slack/pull/813
+[#814]: https://github.com/slack-go/slack/pull/814
+
+### v0.6.0 - August 31, 2019
+full differences can be viewed using `git log --oneline --decorate --color v0.5.0..v0.6.0`
+thanks to everyone who has contributed since January!
+
+
+#### Breaking Changes:
+- Info struct has had fields removed related to deprecated functionality by slack.
+- minor adjustments to some structs.
+- some internal default values have changed, usually to be more inline with slack defaults or to correct inability to set a particular value. (Message Parse for example.)
+
+##### Highlights:
+- new slacktest package easy mocking for slack client. use, enjoy, please submit PRs for improvements and default behaviours! shamelessly taken from the [slack-test repo](https://github.com/lusis/slack-test) thank you lusis for letting us use it and bring it into the slack repo.
+- blocks, blocks, blocks.
+- RTM ManagedConnection has undergone a significant cleanup.
+in particular handles backoffs gracefully, removed many deadlocks,
+and Disconnect is now much more responsive.
+
+### v0.5.0 - January 20, 2019
+full differences can be viewed using `git log --oneline --decorate --color v0.4.0..v0.5.0`
+- Breaking changes: various old struct fields have been removed or updated to match slack's api.
+- deadlock fix in RTM disconnect.
+
+### v0.4.0 - October 06, 2018
+full differences can be viewed using `git log --oneline --decorate --color v0.3.0..v0.4.0`
+- Breaking Change: renamed ApplyMessageOption, to mark it as unsafe,
+this means it may break without warning in the future.
+- Breaking: Msg structure files field changed to an array.
+- General: implementation for new security headers.
+- RTM: deadlock fix between connect/disconnect.
+- Events: various new fields added.
+- Web: various fixes, new fields exposed, new methods added.
+- Interactions: minor additions expect breaking changes in next release for dialogs/button clicks.
+- Utils: new methods added.
+
+### v0.3.0 - July 30, 2018
+full differences can be viewed using `git log --oneline --decorate --color v0.2.0..v0.3.0`
+- slack events initial support added. (still considered experimental and undergoing changes, stability not promised)
+- vendored depedencies using dep, ensure using up to date tooling before filing issues.
+- RTM has improved its ability to identify dead connections and reconnect automatically (worth calling out in case it has unintended side effects).
+- bug fixes (various timestamp handling, error handling, RTM locking, etc).
+
+### v0.2.0 - Feb 10, 2018
+
+Release adds a bunch of functionality and improvements, mainly to give people a recent version to vendor against.
+
+Please check [0.2.0](https://github.com/nlopes/slack/releases/tag/v0.2.0)
+
+### v0.1.0 - May 28, 2017
+
+This is released before adding context support.
+As the used context package is the one from Go 1.7 this will be the last
+compatible with Go < 1.7.
+
+Please check [0.1.0](https://github.com/nlopes/slack/releases/tag/v0.1.0)
+
+### v0.0.1 - Jul 26, 2015
+
+If you just updated from master and it broke your implementation, please
+check [0.0.1](https://github.com/nlopes/slack/releases/tag/v0.0.1)
diff --git a/vendor/github.com/slack-go/slack/LICENSE b/vendor/github.com/slack-go/slack/LICENSE
new file mode 100644
index 0000000..5145171
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Norberto Lopes
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/slack-go/slack/Makefile b/vendor/github.com/slack-go/slack/Makefile
new file mode 100644
index 0000000..7279640
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/Makefile
@@ -0,0 +1,36 @@
+.PHONY: help deps fmt lint test test-race test-integration
+
+help:
+ @echo ""
+ @echo "Welcome to slack-go/slack make."
+ @echo "The following commands are available:"
+ @echo ""
+ @echo " make deps : Fetch all dependencies"
+ @echo " make fmt : Run go fmt to fix any formatting issues"
+ @echo " make lint : Use go vet to check for linting issues"
+ @echo " make test : Run all short tests"
+ @echo " make test-race : Run all tests with race condition checking"
+ @echo " make test-integration : Run all tests without limiting to short"
+ @echo ""
+ @echo " make pr-prep : Run this before making a PR to run fmt, lint and tests"
+ @echo ""
+
+deps:
+ @go mod tidy
+
+fmt:
+ @go fmt .
+
+lint:
+ @go vet .
+
+test:
+ @go test -v -count=1 -timeout 300s -short ./...
+
+test-race:
+ @go test -v -count=1 -timeout 300s -short -race ./...
+
+test-integration:
+ @go test -v -count=1 -timeout 600s ./...
+
+pr-prep: fmt lint test-race test-integration
diff --git a/vendor/github.com/slack-go/slack/README.md b/vendor/github.com/slack-go/slack/README.md
new file mode 100644
index 0000000..9618aeb
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/README.md
@@ -0,0 +1,111 @@
+Slack API in Go [![Go Reference](https://pkg.go.dev/badge/github.com/slack-go/slack.svg)](https://pkg.go.dev/github.com/slack-go/slack)
+===============
+
+This is the original Slack library for Go created by Norberto Lopes, transferred to a GitHub organization.
+
+You can also chat with us on the #slack-go, #slack-go-ja Slack channel on the Gophers Slack.
+
+![logo](logo.png "icon")
+
+This library supports most if not all of the `api.slack.com` REST
+calls, as well as the Real-Time Messaging protocol over websocket, in
+a fully managed way.
+
+## Project Status
+There is currently no major version released.
+Therefore, minor version releases may include backward incompatible changes.
+
+See [CHANGELOG.md](https://github.com/slack-go/slack/blob/master/CHANGELOG.md) or [Releases](https://github.com/slack-go/slack/releases) for more information about the changes.
+
+## Installing
+
+### *go get*
+
+ $ go get -u github.com/slack-go/slack
+
+## Example
+
+### Getting all groups
+
+```golang
+import (
+ "fmt"
+
+ "github.com/slack-go/slack"
+)
+
+func main() {
+ api := slack.New("YOUR_TOKEN_HERE")
+ // If you set debugging, it will log all requests to the console
+ // Useful when encountering issues
+ // slack.New("YOUR_TOKEN_HERE", slack.OptionDebug(true))
+ groups, err := api.GetUserGroups(slack.GetUserGroupsOptionIncludeUsers(false))
+ if err != nil {
+ fmt.Printf("%s\n", err)
+ return
+ }
+ for _, group := range groups {
+ fmt.Printf("ID: %s, Name: %s\n", group.ID, group.Name)
+ }
+}
+```
+
+### Getting User Information
+
+```golang
+import (
+ "fmt"
+
+ "github.com/slack-go/slack"
+)
+
+func main() {
+ api := slack.New("YOUR_TOKEN_HERE")
+ user, err := api.GetUserInfo("U023BECGF")
+ if err != nil {
+ fmt.Printf("%s\n", err)
+ return
+ }
+ fmt.Printf("ID: %s, Fullname: %s, Email: %s\n", user.ID, user.Profile.RealName, user.Profile.Email)
+}
+```
+
+## Minimal Socket Mode usage:
+
+See https://github.com/slack-go/slack/blob/master/examples/socketmode/socketmode.go
+
+
+## Minimal RTM usage:
+
+As mentioned in https://api.slack.com/rtm - for most applications, Socket Mode is a better way to communicate with Slack.
+
+See https://github.com/slack-go/slack/blob/master/examples/websocket/websocket.go
+
+
+## Minimal EventsAPI usage:
+
+See https://github.com/slack-go/slack/blob/master/examples/eventsapi/events.go
+
+## Socketmode Event Handler (Experimental)
+
+When using socket mode, dealing with an event can be pretty lengthy as it requires you to route the event to the right place.
+
+Instead, you can use `SocketmodeHandler` much like you use an HTTP handler to register which event you would like to listen to and what callback function will process that event when it occurs.
+
+See [./examples/socketmode_handler/socketmode_handler.go](./examples/socketmode_handler/socketmode_handler.go)
+## Contributing
+
+You are more than welcome to contribute to this project. Fork and
+make a Pull Request, or create an Issue if you see any problem.
+
+Before making any Pull Request please run the following:
+
+```
+make pr-prep
+```
+
+This will check/update code formatting, linting and then run all tests
+
+## License
+
+BSD 2 Clause license
diff --git a/vendor/github.com/slack-go/slack/TODO.txt b/vendor/github.com/slack-go/slack/TODO.txt
new file mode 100644
index 0000000..8607960
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/TODO.txt
@@ -0,0 +1,3 @@
+- Add more tests!!!
+- Add support to have markdown hints
+ - See section Message Formatting at https://api.slack.com/docs/formatting
diff --git a/vendor/github.com/slack-go/slack/admin.go b/vendor/github.com/slack-go/slack/admin.go
new file mode 100644
index 0000000..d51426b
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/admin.go
@@ -0,0 +1,207 @@
+package slack
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+func (api *Client) adminRequest(ctx context.Context, method string, teamName string, values url.Values) error {
+ resp := &SlackResponse{}
+ err := parseAdminResponse(ctx, api.httpclient, method, teamName, values, resp, api)
+ if err != nil {
+ return err
+ }
+
+ return resp.Err()
+}
+
+// DisableUser disabled a user account, given a user ID
+func (api *Client) DisableUser(teamName string, uid string) error {
+ return api.DisableUserContext(context.Background(), teamName, uid)
+}
+
+// DisableUserContext disabled a user account, given a user ID with a custom context
+func (api *Client) DisableUserContext(ctx context.Context, teamName string, uid string) error {
+ values := url.Values{
+ "user": {uid},
+ "token": {api.token},
+ "set_active": {"true"},
+ "_attempts": {"1"},
+ }
+
+ if err := api.adminRequest(ctx, "setInactive", teamName, values); err != nil {
+ return fmt.Errorf("failed to disable user with id '%s': %s", uid, err)
+ }
+
+ return nil
+}
+
+// InviteGuest invites a user to Slack as a single-channel guest
+func (api *Client) InviteGuest(teamName, channel, firstName, lastName, emailAddress string) error {
+ return api.InviteGuestContext(context.Background(), teamName, channel, firstName, lastName, emailAddress)
+}
+
+// InviteGuestContext invites a user to Slack as a single-channel guest with a custom context
+func (api *Client) InviteGuestContext(ctx context.Context, teamName, channel, firstName, lastName, emailAddress string) error {
+ values := url.Values{
+ "email": {emailAddress},
+ "channels": {channel},
+ "first_name": {firstName},
+ "last_name": {lastName},
+ "ultra_restricted": {"1"},
+ "token": {api.token},
+ "resend": {"true"},
+ "set_active": {"true"},
+ "_attempts": {"1"},
+ }
+
+ err := api.adminRequest(ctx, "invite", teamName, values)
+ if err != nil {
+ return fmt.Errorf("Failed to invite single-channel guest: %s", err)
+ }
+
+ return nil
+}
+
+// InviteRestricted invites a user to Slack as a restricted account
+func (api *Client) InviteRestricted(teamName, channel, firstName, lastName, emailAddress string) error {
+ return api.InviteRestrictedContext(context.Background(), teamName, channel, firstName, lastName, emailAddress)
+}
+
+// InviteRestrictedContext invites a user to Slack as a restricted account with a custom context
+func (api *Client) InviteRestrictedContext(ctx context.Context, teamName, channel, firstName, lastName, emailAddress string) error {
+ values := url.Values{
+ "email": {emailAddress},
+ "channels": {channel},
+ "first_name": {firstName},
+ "last_name": {lastName},
+ "restricted": {"1"},
+ "token": {api.token},
+ "resend": {"true"},
+ "set_active": {"true"},
+ "_attempts": {"1"},
+ }
+
+ err := api.adminRequest(ctx, "invite", teamName, values)
+ if err != nil {
+ return fmt.Errorf("Failed to restricted account: %s", err)
+ }
+
+ return nil
+}
+
+// InviteToTeam invites a user to a Slack team
+func (api *Client) InviteToTeam(teamName, firstName, lastName, emailAddress string) error {
+ return api.InviteToTeamContext(context.Background(), teamName, firstName, lastName, emailAddress)
+}
+
+// InviteToTeamContext invites a user to a Slack team with a custom context
+func (api *Client) InviteToTeamContext(ctx context.Context, teamName, firstName, lastName, emailAddress string) error {
+ values := url.Values{
+ "email": {emailAddress},
+ "first_name": {firstName},
+ "last_name": {lastName},
+ "token": {api.token},
+ "set_active": {"true"},
+ "_attempts": {"1"},
+ }
+
+ err := api.adminRequest(ctx, "invite", teamName, values)
+ if err != nil {
+ return fmt.Errorf("Failed to invite to team: %s", err)
+ }
+
+ return nil
+}
+
+// SetRegular enables the specified user
+func (api *Client) SetRegular(teamName, user string) error {
+ return api.SetRegularContext(context.Background(), teamName, user)
+}
+
+// SetRegularContext enables the specified user with a custom context
+func (api *Client) SetRegularContext(ctx context.Context, teamName, user string) error {
+ values := url.Values{
+ "user": {user},
+ "token": {api.token},
+ "set_active": {"true"},
+ "_attempts": {"1"},
+ }
+
+ err := api.adminRequest(ctx, "setRegular", teamName, values)
+ if err != nil {
+ return fmt.Errorf("Failed to change the user (%s) to a regular user: %s", user, err)
+ }
+
+ return nil
+}
+
+// SendSSOBindingEmail sends an SSO binding email to the specified user
+func (api *Client) SendSSOBindingEmail(teamName, user string) error {
+ return api.SendSSOBindingEmailContext(context.Background(), teamName, user)
+}
+
+// SendSSOBindingEmailContext sends an SSO binding email to the specified user with a custom context
+func (api *Client) SendSSOBindingEmailContext(ctx context.Context, teamName, user string) error {
+ values := url.Values{
+ "user": {user},
+ "token": {api.token},
+ "set_active": {"true"},
+ "_attempts": {"1"},
+ }
+
+ err := api.adminRequest(ctx, "sendSSOBind", teamName, values)
+ if err != nil {
+ return fmt.Errorf("Failed to send SSO binding email for user (%s): %s", user, err)
+ }
+
+ return nil
+}
+
+// SetUltraRestricted converts a user into a single-channel guest
+func (api *Client) SetUltraRestricted(teamName, uid, channel string) error {
+ return api.SetUltraRestrictedContext(context.Background(), teamName, uid, channel)
+}
+
+// SetUltraRestrictedContext converts a user into a single-channel guest with a custom context
+func (api *Client) SetUltraRestrictedContext(ctx context.Context, teamName, uid, channel string) error {
+ values := url.Values{
+ "user": {uid},
+ "channel": {channel},
+ "token": {api.token},
+ "set_active": {"true"},
+ "_attempts": {"1"},
+ }
+
+ err := api.adminRequest(ctx, "setUltraRestricted", teamName, values)
+ if err != nil {
+ return fmt.Errorf("Failed to ultra-restrict account: %s", err)
+ }
+
+ return nil
+}
+
+// SetRestricted converts a user into a restricted account
+func (api *Client) SetRestricted(teamName, uid string, channelIds ...string) error {
+ return api.SetRestrictedContext(context.Background(), teamName, uid, channelIds...)
+}
+
+// SetRestrictedContext converts a user into a restricted account with a custom context
+func (api *Client) SetRestrictedContext(ctx context.Context, teamName, uid string, channelIds ...string) error {
+ values := url.Values{
+ "user": {uid},
+ "token": {api.token},
+ "set_active": {"true"},
+ "_attempts": {"1"},
+ "channels": {strings.Join(channelIds, ",")},
+ }
+
+ err := api.adminRequest(ctx, "setRestricted", teamName, values)
+ if err != nil {
+ return fmt.Errorf("failed to restrict account: %s", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/slack-go/slack/apps.go b/vendor/github.com/slack-go/slack/apps.go
new file mode 100644
index 0000000..10d4297
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/apps.go
@@ -0,0 +1,64 @@
+package slack
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+)
+
+type listEventAuthorizationsResponse struct {
+ SlackResponse
+ Authorizations []EventAuthorization `json:"authorizations"`
+}
+
+type EventAuthorization struct {
+ EnterpriseID string `json:"enterprise_id"`
+ TeamID string `json:"team_id"`
+ UserID string `json:"user_id"`
+ IsBot bool `json:"is_bot"`
+ IsEnterpriseInstall bool `json:"is_enterprise_install"`
+}
+
+func (api *Client) ListEventAuthorizations(eventContext string) ([]EventAuthorization, error) {
+ return api.ListEventAuthorizationsContext(context.Background(), eventContext)
+}
+
+// ListEventAuthorizationsContext lists authed users and teams for the given event_context. You must provide an app-level token to the client using OptionAppLevelToken. More info: https://api.slack.com/methods/apps.event.authorizations.list
+func (api *Client) ListEventAuthorizationsContext(ctx context.Context, eventContext string) ([]EventAuthorization, error) {
+ resp := &listEventAuthorizationsResponse{}
+
+ request, _ := json.Marshal(map[string]string{
+ "event_context": eventContext,
+ })
+
+ err := postJSON(ctx, api.httpclient, api.endpoint+"apps.event.authorizations.list", api.appLevelToken, request, &resp, api)
+
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Ok {
+ return nil, resp.Err()
+ }
+
+ return resp.Authorizations, nil
+}
+
+func (api *Client) UninstallApp(clientID, clientSecret string) error {
+ return api.UninstallAppContext(context.Background(), clientID, clientSecret)
+}
+
+func (api *Client) UninstallAppContext(ctx context.Context, clientID, clientSecret string) error {
+ values := url.Values{
+ "client_id": {clientID},
+ "client_secret": {clientSecret},
+ }
+
+ response := SlackResponse{}
+
+ err := api.getMethod(ctx, "apps.uninstall", api.token, values, &response)
+ if err != nil {
+ return err
+ }
+
+ return response.Err()
+}
diff --git a/vendor/github.com/slack-go/slack/attachments.go b/vendor/github.com/slack-go/slack/attachments.go
new file mode 100644
index 0000000..f4eb9b9
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/attachments.go
@@ -0,0 +1,98 @@
+package slack
+
+import "encoding/json"
+
+// AttachmentField contains information for an attachment field
+// An Attachment can contain multiple of these
+type AttachmentField struct {
+ Title string `json:"title"`
+ Value string `json:"value"`
+ Short bool `json:"short"`
+}
+
+// AttachmentAction is a button or menu to be included in the attachment. Required when
+// using message buttons or menus and otherwise not useful. A maximum of 5 actions may be
+// provided per attachment.
+type AttachmentAction struct {
+ Name string `json:"name"` // Required.
+ Text string `json:"text"` // Required.
+ Style string `json:"style,omitempty"` // Optional. Allowed values: "default", "primary", "danger".
+ Type ActionType `json:"type"` // Required. Must be set to "button" or "select".
+ Value string `json:"value,omitempty"` // Optional.
+ DataSource string `json:"data_source,omitempty"` // Optional.
+ MinQueryLength int `json:"min_query_length,omitempty"` // Optional. Default value is 1.
+ Options []AttachmentActionOption `json:"options,omitempty"` // Optional. Maximum of 100 options can be provided in each menu.
+ SelectedOptions []AttachmentActionOption `json:"selected_options,omitempty"` // Optional. The first element of this array will be set as the pre-selected option for this menu.
+ OptionGroups []AttachmentActionOptionGroup `json:"option_groups,omitempty"` // Optional.
+ Confirm *ConfirmationField `json:"confirm,omitempty"` // Optional.
+ URL string `json:"url,omitempty"` // Optional.
+}
+
+// actionType returns the type of the action
+func (a AttachmentAction) actionType() ActionType {
+ return a.Type
+}
+
+// AttachmentActionOption the individual option to appear in action menu.
+type AttachmentActionOption struct {
+ Text string `json:"text"` // Required.
+ Value string `json:"value"` // Required.
+ Description string `json:"description,omitempty"` // Optional. Up to 30 characters.
+}
+
+// AttachmentActionOptionGroup is a semi-hierarchal way to list available options to appear in action menu.
+type AttachmentActionOptionGroup struct {
+ Text string `json:"text"` // Required.
+ Options []AttachmentActionOption `json:"options"` // Required.
+}
+
+// AttachmentActionCallback is sent from Slack when a user clicks a button in an interactive message (aka AttachmentAction)
+// DEPRECATED: use InteractionCallback
+type AttachmentActionCallback InteractionCallback
+
+// ConfirmationField are used to ask users to confirm actions
+type ConfirmationField struct {
+ Title string `json:"title,omitempty"` // Optional.
+ Text string `json:"text"` // Required.
+ OkText string `json:"ok_text,omitempty"` // Optional. Defaults to "Okay"
+ DismissText string `json:"dismiss_text,omitempty"` // Optional. Defaults to "Cancel"
+}
+
+// Attachment contains all the information for an attachment
+type Attachment struct {
+ Color string `json:"color,omitempty"`
+ Fallback string `json:"fallback,omitempty"`
+
+ CallbackID string `json:"callback_id,omitempty"`
+ ID int `json:"id,omitempty"`
+
+ AuthorID string `json:"author_id,omitempty"`
+ AuthorName string `json:"author_name,omitempty"`
+ AuthorSubname string `json:"author_subname,omitempty"`
+ AuthorLink string `json:"author_link,omitempty"`
+ AuthorIcon string `json:"author_icon,omitempty"`
+
+ Title string `json:"title,omitempty"`
+ TitleLink string `json:"title_link,omitempty"`
+ Pretext string `json:"pretext,omitempty"`
+ Text string `json:"text,omitempty"`
+
+ ImageURL string `json:"image_url,omitempty"`
+ ThumbURL string `json:"thumb_url,omitempty"`
+
+ ServiceName string `json:"service_name,omitempty"`
+ ServiceIcon string `json:"service_icon,omitempty"`
+ FromURL string `json:"from_url,omitempty"`
+ OriginalURL string `json:"original_url,omitempty"`
+
+ Fields []AttachmentField `json:"fields,omitempty"`
+ Actions []AttachmentAction `json:"actions,omitempty"`
+ MarkdownIn []string `json:"mrkdwn_in,omitempty"`
+
+ Blocks Blocks `json:"blocks,omitempty"`
+
+ Footer string `json:"footer,omitempty"`
+ FooterIcon string `json:"footer_icon,omitempty"`
+
+ Ts json.Number `json:"ts,omitempty"`
+}
diff --git a/vendor/github.com/slack-go/slack/audit.go b/vendor/github.com/slack-go/slack/audit.go
new file mode 100644
index 0000000..a3ea7eb
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/audit.go
@@ -0,0 +1,152 @@
+package slack
+
+import (
+ "context"
+ "net/url"
+ "strconv"
+)
+
+type AuditLogResponse struct {
+ Entries []AuditEntry `json:"entries"`
+ SlackResponse
+}
+
+type AuditEntry struct {
+ ID string `json:"id"`
+ DateCreate int `json:"date_create"`
+ Action string `json:"action"`
+ Actor struct {
+ Type string `json:"type"`
+ User AuditUser `json:"user"`
+ } `json:"actor"`
+ Entity struct {
+ Type string `json:"type"`
+ // Only one of the below will be completed, based on the value of Type a user, a channel, a file, an app, a workspace, or an enterprise
+ User AuditUser `json:"user"`
+ Channel AuditChannel `json:"channel"`
+ File AuditFile `json:"file"`
+ App AuditApp `json:"app"`
+ Workspace AuditWorkspace `json:"workspace"`
+ Enterprise AuditEnterprise `json:"enterprise"`
+ } `json:"entity"`
+ Context struct {
+ Location struct {
+ Type string `json:"type"`
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Domain string `json:"domain"`
+ } `json:"location"`
+ UA string `json:"ua"`
+ IPAddress string `json:"ip_address"`
+ } `json:"context"`
+ Details struct {
+ NewValue interface{} `json:"new_value"`
+ PreviousValue interface{} `json:"previous_value"`
+ MobileOnly bool `json:"mobile_only"`
+ WebOnly bool `json:"web_only"`
+ NonSSOOnly bool `json:"non_sso_only"`
+ ExportType string `json:"export_type"`
+ ExportStart string `json:"export_start_ts"`
+ ExportEnd string `json:"export_end_ts"`
+ } `json:"details"`
+}
+
+type AuditUser struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Email string `json:"email"`
+ Team string `json:"team"`
+}
+
+type AuditChannel struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Privacy string `json:"privacy"`
+ IsShared bool `json:"is_shared"`
+ IsOrgShared bool `json:"is_org_shared"`
+}
+
+type AuditFile struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Filetype string `json:"filetype"`
+ Title string `json:"title"`
+}
+
+type AuditApp struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ IsDistributed bool `json:"is_distributed"`
+ IsDirectoryApproved bool `json:"is_directory_approved"`
+ IsWorkflowApp bool `json:"is_workflow_app"`
+ Scopes []string `json:"scopes"`
+}
+
+type AuditWorkspace struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Domain string `json:"domain"`
+}
+
+type AuditEnterprise struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Domain string `json:"domain"`
+}
+
+// AuditLogParameters contains all the parameters necessary (including the optional ones) for a GetAuditLogs() request
+type AuditLogParameters struct {
+ Limit int
+ Cursor string
+ Latest int
+ Oldest int
+ Action string
+ Actor string
+ Entity string
+}
+
+func (api *Client) auditLogsRequest(ctx context.Context, path string, values url.Values) (*AuditLogResponse, error) {
+ response := &AuditLogResponse{}
+ err := api.getMethod(ctx, path, api.token, values, response)
+ if err != nil {
+ return nil, err
+ }
+ return response, response.Err()
+}
+
+// GetAuditLogs retrieves a page of audit entires according to the parameters given
+func (api *Client) GetAuditLogs(params AuditLogParameters) (entries []AuditEntry, nextCursor string, err error) {
+ return api.GetAuditLogsContext(context.Background(), params)
+}
+
+// GetAuditLogsContext retrieves a page of audit entries according to the parameters given with a custom context
+func (api *Client) GetAuditLogsContext(ctx context.Context, params AuditLogParameters) (entries []AuditEntry, nextCursor string, err error) {
+ values := url.Values{}
+ if params.Limit != 0 {
+ values.Add("limit", strconv.Itoa(params.Limit))
+ }
+ if params.Oldest != 0 {
+ values.Add("oldest", strconv.Itoa(params.Oldest))
+ }
+ if params.Latest != 0 {
+ values.Add("latest", strconv.Itoa(params.Latest))
+ }
+ if params.Cursor != "" {
+ values.Add("cursor", params.Cursor)
+ }
+ if params.Action != "" {
+ values.Add("action", params.Action)
+ }
+ if params.Actor != "" {
+ values.Add("actor", params.Actor)
+ }
+ if params.Entity != "" {
+ values.Add("entity", params.Entity)
+ }
+
+ response, err := api.auditLogsRequest(ctx, "audit/v1/logs", values)
+ if err != nil {
+ return nil, "", err
+ }
+ return response.Entries, response.ResponseMetadata.Cursor, response.Err()
+}
diff --git a/vendor/github.com/slack-go/slack/auth.go b/vendor/github.com/slack-go/slack/auth.go
new file mode 100644
index 0000000..bf6e80d
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/auth.go
@@ -0,0 +1,74 @@
+package slack
+
+import (
+ "context"
+ "net/url"
+)
+
+// AuthRevokeResponse contains our Auth response from the auth.revoke endpoint
+type AuthRevokeResponse struct {
+ SlackResponse // Contains the "ok", and "Error", if any
+ Revoked bool `json:"revoked,omitempty"`
+}
+
+// authRequest sends the actual request, and unmarshals the response
+func (api *Client) authRequest(ctx context.Context, path string, values url.Values) (*AuthRevokeResponse, error) {
+ response := &AuthRevokeResponse{}
+ err := api.postMethod(ctx, path, values, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, response.Err()
+}
+
+// SendAuthRevoke will send a revocation for our token
+func (api *Client) SendAuthRevoke(token string) (*AuthRevokeResponse, error) {
+ return api.SendAuthRevokeContext(context.Background(), token)
+}
+
+// SendAuthRevokeContext will send a revocation request for our token to api.revoke with context
+func (api *Client) SendAuthRevokeContext(ctx context.Context, token string) (*AuthRevokeResponse, error) {
+ if token == "" {
+ token = api.token
+ }
+ values := url.Values{
+ "token": {token},
+ }
+
+ return api.authRequest(ctx, "auth.revoke", values)
+}
+
+type listTeamsResponse struct {
+ Teams []Team `json:"teams"`
+ SlackResponse
+}
+
+type ListTeamsParameters struct {
+ Limit int
+ Cursor string
+}
+
+// ListTeams returns all workspaces a token can access.
+// More info: https://api.slack.com/methods/admin.teams.list
+func (api *Client) ListTeams(params ListTeamsParameters) ([]Team, string, error) {
+ return api.ListTeamsContext(context.Background(), params)
+}
+
+// ListTeams returns all workspaces a token can access with a custom context.
+func (api *Client) ListTeamsContext(ctx context.Context, params ListTeamsParameters) ([]Team, string, error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+ if params.Cursor != "" {
+ values.Add("cursor", params.Cursor)
+ }
+
+ response := &listTeamsResponse{}
+ err := api.postMethod(ctx, "auth.teams.list", values, response)
+ if err != nil {
+ return nil, "", err
+ }
+
+ return response.Teams, response.ResponseMetadata.Cursor, response.Err()
+}
diff --git a/vendor/github.com/slack-go/slack/block.go b/vendor/github.com/slack-go/slack/block.go
new file mode 100644
index 0000000..18222bd
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block.go
@@ -0,0 +1,82 @@
+package slack
+
+// @NOTE: Blocks are in beta and subject to change.
+
+// More Information: https://api.slack.com/block-kit
+
+// MessageBlockType defines a named string type to define each block type
+// as a constant for use within the package.
+type MessageBlockType string
+
+const (
+ MBTSection MessageBlockType = "section"
+ MBTDivider MessageBlockType = "divider"
+ MBTImage MessageBlockType = "image"
+ MBTAction MessageBlockType = "actions"
+ MBTContext MessageBlockType = "context"
+ MBTFile MessageBlockType = "file"
+ MBTInput MessageBlockType = "input"
+ MBTHeader MessageBlockType = "header"
+ MBTRichText MessageBlockType = "rich_text"
+)
+
+// Block defines an interface all block types should implement
+// to ensure consistency between blocks.
+type Block interface {
+ BlockType() MessageBlockType
+}
+
+// Blocks is a convenience struct defined to allow dynamic unmarshalling of
+// the "blocks" value in Slack's JSON response, which varies depending on block type
+type Blocks struct {
+ BlockSet []Block `json:"blocks,omitempty"`
+}
+
+// BlockAction is the action callback sent when a block is interacted with
+type BlockAction struct {
+ ActionID string `json:"action_id"`
+ BlockID string `json:"block_id"`
+ Type ActionType `json:"type"`
+ Text TextBlockObject `json:"text"`
+ Value string `json:"value"`
+ ActionTs string `json:"action_ts"`
+ SelectedOption OptionBlockObject `json:"selected_option"`
+ SelectedOptions []OptionBlockObject `json:"selected_options"`
+ SelectedUser string `json:"selected_user"`
+ SelectedUsers []string `json:"selected_users"`
+ SelectedChannel string `json:"selected_channel"`
+ SelectedChannels []string `json:"selected_channels"`
+ SelectedConversation string `json:"selected_conversation"`
+ SelectedConversations []string `json:"selected_conversations"`
+ SelectedDate string `json:"selected_date"`
+ SelectedTime string `json:"selected_time"`
+ SelectedDateTime int64 `json:"selected_date_time"`
+ InitialOption OptionBlockObject `json:"initial_option"`
+ InitialUser string `json:"initial_user"`
+ InitialChannel string `json:"initial_channel"`
+ InitialConversation string `json:"initial_conversation"`
+ InitialDate string `json:"initial_date"`
+ InitialTime string `json:"initial_time"`
+}
+
+// actionType returns the type of the action
+func (b BlockAction) actionType() ActionType {
+ return b.Type
+}
+
+// NewBlockMessage creates a new Message that contains one or more blocks to be displayed
+func NewBlockMessage(blocks ...Block) Message {
+ return Message{
+ Msg: Msg{
+ Blocks: Blocks{
+ BlockSet: blocks,
+ },
+ },
+ }
+}
+
+// AddBlockMessage appends a block to the end of the existing list of blocks
+func AddBlockMessage(message Message, newBlk Block) Message {
+ message.Msg.Blocks.BlockSet = append(message.Msg.Blocks.BlockSet, newBlk)
+ return message
+}
diff --git a/vendor/github.com/slack-go/slack/block_action.go b/vendor/github.com/slack-go/slack/block_action.go
new file mode 100644
index 0000000..c15e4a3
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_action.go
@@ -0,0 +1,26 @@
+package slack
+
+// ActionBlock defines data that is used to hold interactive elements.
+//
+// More Information: https://api.slack.com/reference/messaging/blocks#actions
+type ActionBlock struct {
+ Type MessageBlockType `json:"type"`
+ BlockID string `json:"block_id,omitempty"`
+ Elements *BlockElements `json:"elements"`
+}
+
+// BlockType returns the type of the block
+func (s ActionBlock) BlockType() MessageBlockType {
+ return s.Type
+}
+
+// NewActionBlock returns a new instance of an Action Block
+func NewActionBlock(blockID string, elements ...BlockElement) *ActionBlock {
+ return &ActionBlock{
+ Type: MBTAction,
+ BlockID: blockID,
+ Elements: &BlockElements{
+ ElementSet: elements,
+ },
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/block_context.go b/vendor/github.com/slack-go/slack/block_context.go
new file mode 100644
index 0000000..384fee2
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_context.go
@@ -0,0 +1,32 @@
+package slack
+
+// ContextBlock defines data that is used to display message context, which can
+// include both images and text.
+//
+// More Information: https://api.slack.com/reference/messaging/blocks#context
+type ContextBlock struct {
+ Type MessageBlockType `json:"type"`
+ BlockID string `json:"block_id,omitempty"`
+ ContextElements ContextElements `json:"elements"`
+}
+
+// BlockType returns the type of the block
+func (s ContextBlock) BlockType() MessageBlockType {
+ return s.Type
+}
+
+type ContextElements struct {
+ Elements []MixedElement
+}
+
+// NewContextBlock returns a new instance of a context block
+func NewContextBlock(blockID string, mixedElements ...MixedElement) *ContextBlock {
+ elements := ContextElements{
+ Elements: mixedElements,
+ }
+ return &ContextBlock{
+ Type: MBTContext,
+ BlockID: blockID,
+ ContextElements: elements,
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/block_conv.go b/vendor/github.com/slack-go/slack/block_conv.go
new file mode 100644
index 0000000..4ab58de
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_conv.go
@@ -0,0 +1,437 @@
+package slack
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+)
+
+type sumtype struct {
+ TypeVal string `json:"type"`
+}
+
+// MarshalJSON implements the Marshaller interface for Blocks so that any JSON
+// marshalling is delegated and proper type determination can be made before marshal
+func (b Blocks) MarshalJSON() ([]byte, error) {
+ bytes, err := json.Marshal(b.BlockSet)
+ if err != nil {
+ return nil, err
+ }
+
+ return bytes, nil
+}
+
+// UnmarshalJSON implements the Unmarshaller interface for Blocks, so that any JSON
+// unmarshalling is delegated and proper type determination can be made before unmarshal
+func (b *Blocks) UnmarshalJSON(data []byte) error {
+ var raw []json.RawMessage
+
+ if string(data) == "{}" {
+ return nil
+ }
+
+ err := json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ var blocks Blocks
+ for _, r := range raw {
+ s := sumtype{}
+ err := json.Unmarshal(r, &s)
+ if err != nil {
+ return err
+ }
+
+ var blockType string
+ if s.TypeVal != "" {
+ blockType = s.TypeVal
+ }
+
+ var block Block
+ switch blockType {
+ case "actions":
+ block = &ActionBlock{}
+ case "context":
+ block = &ContextBlock{}
+ case "divider":
+ block = &DividerBlock{}
+ case "file":
+ block = &FileBlock{}
+ case "header":
+ block = &HeaderBlock{}
+ case "image":
+ block = &ImageBlock{}
+ case "input":
+ block = &InputBlock{}
+ case "rich_text":
+ block = &RichTextBlock{}
+ case "section":
+ block = &SectionBlock{}
+ default:
+ block = &UnknownBlock{}
+ }
+
+ err = json.Unmarshal(r, block)
+ if err != nil {
+ return err
+ }
+
+ blocks.BlockSet = append(blocks.BlockSet, block)
+ }
+
+ *b = blocks
+ return nil
+}
+
+// UnmarshalJSON implements the Unmarshaller interface for InputBlock, so that any JSON
+// unmarshalling is delegated and proper type determination can be made before unmarshal
+func (b *InputBlock) UnmarshalJSON(data []byte) error {
+ type alias InputBlock
+ a := struct {
+ Element json.RawMessage `json:"element"`
+ *alias
+ }{
+ alias: (*alias)(b),
+ }
+
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+
+ s := sumtype{}
+ if err := json.Unmarshal(a.Element, &s); err != nil {
+ return nil
+ }
+
+ var e BlockElement
+ switch s.TypeVal {
+ case "datepicker":
+ e = &DatePickerBlockElement{}
+ case "timepicker":
+ e = &TimePickerBlockElement{}
+ case "datetimepicker":
+ e = &DateTimePickerBlockElement{}
+ case "plain_text_input":
+ e = &PlainTextInputBlockElement{}
+ case "email_text_input":
+ e = &EmailTextInputBlockElement{}
+ case "url_text_input":
+ e = &URLTextInputBlockElement{}
+ case "static_select", "external_select", "users_select", "conversations_select", "channels_select":
+ e = &SelectBlockElement{}
+ case "multi_static_select", "multi_external_select", "multi_users_select", "multi_conversations_select", "multi_channels_select":
+ e = &MultiSelectBlockElement{}
+ case "checkboxes":
+ e = &CheckboxGroupsBlockElement{}
+ case "overflow":
+ e = &OverflowBlockElement{}
+ case "radio_buttons":
+ e = &RadioButtonsBlockElement{}
+ case "number_input":
+ e = &NumberInputBlockElement{}
+ default:
+ return errors.New("unsupported block element type")
+ }
+
+ if err := json.Unmarshal(a.Element, e); err != nil {
+ return err
+ }
+ b.Element = e
+
+ return nil
+}
+
+// MarshalJSON implements the Marshaller interface for BlockElements so that any JSON
+// marshalling is delegated and proper type determination can be made before marshal
+func (b *BlockElements) MarshalJSON() ([]byte, error) {
+ bytes, err := json.Marshal(b.ElementSet)
+ if err != nil {
+ return nil, err
+ }
+
+ return bytes, nil
+}
+
+// UnmarshalJSON implements the Unmarshaller interface for BlockElements, so that any JSON
+// unmarshalling is delegated and proper type determination can be made before unmarshal
+func (b *BlockElements) UnmarshalJSON(data []byte) error {
+ var raw []json.RawMessage
+
+ if string(data) == "{}" {
+ return nil
+ }
+
+ err := json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ var blockElements BlockElements
+ for _, r := range raw {
+ s := sumtype{}
+ err := json.Unmarshal(r, &s)
+ if err != nil {
+ return err
+ }
+
+ var blockElementType string
+ if s.TypeVal != "" {
+ blockElementType = s.TypeVal
+ }
+
+ var blockElement BlockElement
+ switch blockElementType {
+ case "image":
+ blockElement = &ImageBlockElement{}
+ case "button":
+ blockElement = &ButtonBlockElement{}
+ case "overflow":
+ blockElement = &OverflowBlockElement{}
+ case "datepicker":
+ blockElement = &DatePickerBlockElement{}
+ case "timepicker":
+ blockElement = &TimePickerBlockElement{}
+ case "datetimepicker":
+ blockElement = &DateTimePickerBlockElement{}
+ case "plain_text_input":
+ blockElement = &PlainTextInputBlockElement{}
+ case "email_text_input":
+ blockElement = &EmailTextInputBlockElement{}
+ case "url_text_input":
+ blockElement = &URLTextInputBlockElement{}
+ case "checkboxes":
+ blockElement = &CheckboxGroupsBlockElement{}
+ case "radio_buttons":
+ blockElement = &RadioButtonsBlockElement{}
+ case "static_select", "external_select", "users_select", "conversations_select", "channels_select":
+ blockElement = &SelectBlockElement{}
+ case "number_input":
+ blockElement = &NumberInputBlockElement{}
+ default:
+ return fmt.Errorf("unsupported block element type %v", blockElementType)
+ }
+
+ err = json.Unmarshal(r, blockElement)
+ if err != nil {
+ return err
+ }
+
+ blockElements.ElementSet = append(blockElements.ElementSet, blockElement)
+ }
+
+ *b = blockElements
+ return nil
+}
+
+// MarshalJSON implements the Marshaller interface for Accessory so that any JSON
+// marshalling is delegated and proper type determination can be made before marshal
+func (a *Accessory) MarshalJSON() ([]byte, error) {
+ bytes, err := json.Marshal(toBlockElement(a))
+ if err != nil {
+ return nil, err
+ }
+
+ return bytes, nil
+}
+
+// UnmarshalJSON implements the Unmarshaller interface for Accessory, so that any JSON
+// unmarshalling is delegated and proper type determination can be made before unmarshal
+// Note: datetimepicker is not supported in Accessory
+func (a *Accessory) UnmarshalJSON(data []byte) error {
+ var r json.RawMessage
+
+ if string(data) == "{\"accessory\":null}" {
+ return nil
+ }
+
+ err := json.Unmarshal(data, &r)
+ if err != nil {
+ return err
+ }
+
+ s := sumtype{}
+ err = json.Unmarshal(r, &s)
+ if err != nil {
+ return err
+ }
+
+ var blockElementType string
+ if s.TypeVal != "" {
+ blockElementType = s.TypeVal
+ }
+
+ switch blockElementType {
+ case "image":
+ element, err := unmarshalBlockElement(r, &ImageBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.ImageElement = element.(*ImageBlockElement)
+ case "button":
+ element, err := unmarshalBlockElement(r, &ButtonBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.ButtonElement = element.(*ButtonBlockElement)
+ case "overflow":
+ element, err := unmarshalBlockElement(r, &OverflowBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.OverflowElement = element.(*OverflowBlockElement)
+ case "datepicker":
+ element, err := unmarshalBlockElement(r, &DatePickerBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.DatePickerElement = element.(*DatePickerBlockElement)
+ case "timepicker":
+ element, err := unmarshalBlockElement(r, &TimePickerBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.TimePickerElement = element.(*TimePickerBlockElement)
+ case "plain_text_input":
+ element, err := unmarshalBlockElement(r, &PlainTextInputBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.PlainTextInputElement = element.(*PlainTextInputBlockElement)
+ case "radio_buttons":
+ element, err := unmarshalBlockElement(r, &RadioButtonsBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.RadioButtonsElement = element.(*RadioButtonsBlockElement)
+ case "static_select", "external_select", "users_select", "conversations_select", "channels_select":
+ element, err := unmarshalBlockElement(r, &SelectBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.SelectElement = element.(*SelectBlockElement)
+ case "multi_static_select", "multi_external_select", "multi_users_select", "multi_conversations_select", "multi_channels_select":
+ element, err := unmarshalBlockElement(r, &MultiSelectBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.MultiSelectElement = element.(*MultiSelectBlockElement)
+ case "checkboxes":
+ element, err := unmarshalBlockElement(r, &CheckboxGroupsBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.CheckboxGroupsBlockElement = element.(*CheckboxGroupsBlockElement)
+ default:
+ element, err := unmarshalBlockElement(r, &UnknownBlockElement{})
+ if err != nil {
+ return err
+ }
+ a.UnknownElement = element.(*UnknownBlockElement)
+ }
+
+ return nil
+}
+
+func unmarshalBlockElement(r json.RawMessage, element BlockElement) (BlockElement, error) {
+ err := json.Unmarshal(r, element)
+ if err != nil {
+ return nil, err
+ }
+ return element, nil
+}
+
+func toBlockElement(element *Accessory) BlockElement {
+ if element.ImageElement != nil {
+ return element.ImageElement
+ }
+ if element.ButtonElement != nil {
+ return element.ButtonElement
+ }
+ if element.OverflowElement != nil {
+ return element.OverflowElement
+ }
+ if element.DatePickerElement != nil {
+ return element.DatePickerElement
+ }
+ if element.TimePickerElement != nil {
+ return element.TimePickerElement
+ }
+ if element.PlainTextInputElement != nil {
+ return element.PlainTextInputElement
+ }
+ if element.RadioButtonsElement != nil {
+ return element.RadioButtonsElement
+ }
+ if element.CheckboxGroupsBlockElement != nil {
+ return element.CheckboxGroupsBlockElement
+ }
+ if element.SelectElement != nil {
+ return element.SelectElement
+ }
+ if element.MultiSelectElement != nil {
+ return element.MultiSelectElement
+ }
+
+ return nil
+}
+
+// MarshalJSON implements the Marshaller interface for ContextElements so that any JSON
+// marshalling is delegated and proper type determination can be made before marshal
+func (e *ContextElements) MarshalJSON() ([]byte, error) {
+ bytes, err := json.Marshal(e.Elements)
+ if err != nil {
+ return nil, err
+ }
+
+ return bytes, nil
+}
+
+// UnmarshalJSON implements the Unmarshaller interface for ContextElements, so that any JSON
+// unmarshalling is delegated and proper type determination can be made before unmarshal
+func (e *ContextElements) UnmarshalJSON(data []byte) error {
+ var raw []json.RawMessage
+
+ if string(data) == "{\"elements\":null}" {
+ return nil
+ }
+
+ err := json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ for _, r := range raw {
+ s := sumtype{}
+ err := json.Unmarshal(r, &s)
+ if err != nil {
+ return err
+ }
+
+ var contextElementType string
+ if s.TypeVal != "" {
+ contextElementType = s.TypeVal
+ }
+
+ switch contextElementType {
+ case PlainTextType, MarkdownType:
+ elem, err := unmarshalBlockObject(r, &TextBlockObject{})
+ if err != nil {
+ return err
+ }
+
+ e.Elements = append(e.Elements, elem.(*TextBlockObject))
+ case "image":
+ elem, err := unmarshalBlockElement(r, &ImageBlockElement{})
+ if err != nil {
+ return err
+ }
+
+ e.Elements = append(e.Elements, elem.(*ImageBlockElement))
+ default:
+ return errors.New("unsupported context element type")
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/slack-go/slack/block_divider.go b/vendor/github.com/slack-go/slack/block_divider.go
new file mode 100644
index 0000000..2d442ba
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_divider.go
@@ -0,0 +1,22 @@
+package slack
+
+// DividerBlock for displaying a divider line between blocks (similar to
tag in html)
+//
+// More Information: https://api.slack.com/reference/messaging/blocks#divider
+type DividerBlock struct {
+ Type MessageBlockType `json:"type"`
+ BlockID string `json:"block_id,omitempty"`
+}
+
+// BlockType returns the type of the block
+func (s DividerBlock) BlockType() MessageBlockType {
+ return s.Type
+}
+
+// NewDividerBlock returns a new instance of a divider block
+func NewDividerBlock() *DividerBlock {
+ return &DividerBlock{
+ Type: MBTDivider,
+ }
+
+}
diff --git a/vendor/github.com/slack-go/slack/block_element.go b/vendor/github.com/slack-go/slack/block_element.go
new file mode 100644
index 0000000..a70d8f2
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_element.go
@@ -0,0 +1,593 @@
+package slack
+
+// https://api.slack.com/reference/messaging/block-elements
+
+const (
+ METCheckboxGroups MessageElementType = "checkboxes"
+ METImage MessageElementType = "image"
+ METButton MessageElementType = "button"
+ METOverflow MessageElementType = "overflow"
+ METDatepicker MessageElementType = "datepicker"
+ METTimepicker MessageElementType = "timepicker"
+ METDatetimepicker MessageElementType = "datetimepicker"
+ METPlainTextInput MessageElementType = "plain_text_input"
+ METRadioButtons MessageElementType = "radio_buttons"
+ METEmailTextInput MessageElementType = "email_text_input"
+ METURLTextInput MessageElementType = "url_text_input"
+ METNumber MessageElementType = "number_input"
+
+ MixedElementImage MixedElementType = "mixed_image"
+ MixedElementText MixedElementType = "mixed_text"
+
+ OptTypeStatic string = "static_select"
+ OptTypeExternal string = "external_select"
+ OptTypeUser string = "users_select"
+ OptTypeConversations string = "conversations_select"
+ OptTypeChannels string = "channels_select"
+
+ MultiOptTypeStatic string = "multi_static_select"
+ MultiOptTypeExternal string = "multi_external_select"
+ MultiOptTypeUser string = "multi_users_select"
+ MultiOptTypeConversations string = "multi_conversations_select"
+ MultiOptTypeChannels string = "multi_channels_select"
+)
+
+type MessageElementType string
+type MixedElementType string
+
+// BlockElement defines an interface that all block element types should implement.
+type BlockElement interface {
+ ElementType() MessageElementType
+}
+
+type MixedElement interface {
+ MixedElementType() MixedElementType
+}
+
+type Accessory struct {
+ ImageElement *ImageBlockElement
+ ButtonElement *ButtonBlockElement
+ OverflowElement *OverflowBlockElement
+ DatePickerElement *DatePickerBlockElement
+ TimePickerElement *TimePickerBlockElement
+ PlainTextInputElement *PlainTextInputBlockElement
+ RadioButtonsElement *RadioButtonsBlockElement
+ SelectElement *SelectBlockElement
+ MultiSelectElement *MultiSelectBlockElement
+ CheckboxGroupsBlockElement *CheckboxGroupsBlockElement
+ UnknownElement *UnknownBlockElement
+}
+
+// NewAccessory returns a new Accessory for a given block element
+func NewAccessory(element BlockElement) *Accessory {
+ switch element.(type) {
+ case *ImageBlockElement:
+ return &Accessory{ImageElement: element.(*ImageBlockElement)}
+ case *ButtonBlockElement:
+ return &Accessory{ButtonElement: element.(*ButtonBlockElement)}
+ case *OverflowBlockElement:
+ return &Accessory{OverflowElement: element.(*OverflowBlockElement)}
+ case *DatePickerBlockElement:
+ return &Accessory{DatePickerElement: element.(*DatePickerBlockElement)}
+ case *TimePickerBlockElement:
+ return &Accessory{TimePickerElement: element.(*TimePickerBlockElement)}
+ case *PlainTextInputBlockElement:
+ return &Accessory{PlainTextInputElement: element.(*PlainTextInputBlockElement)}
+ case *RadioButtonsBlockElement:
+ return &Accessory{RadioButtonsElement: element.(*RadioButtonsBlockElement)}
+ case *SelectBlockElement:
+ return &Accessory{SelectElement: element.(*SelectBlockElement)}
+ case *MultiSelectBlockElement:
+ return &Accessory{MultiSelectElement: element.(*MultiSelectBlockElement)}
+ case *CheckboxGroupsBlockElement:
+ return &Accessory{CheckboxGroupsBlockElement: element.(*CheckboxGroupsBlockElement)}
+ default:
+ return &Accessory{UnknownElement: element.(*UnknownBlockElement)}
+ }
+}
+
+// BlockElements is a convenience struct defined to allow dynamic unmarshalling of
+// the "elements" value in Slack's JSON response, which varies depending on BlockElement type
+type BlockElements struct {
+ ElementSet []BlockElement `json:"elements,omitempty"`
+}
+
+// UnknownBlockElement any block element that this library does not directly support.
+// See the "Rich Elements" section at the following URL:
+// https://api.slack.com/changelog/2019-09-what-they-see-is-what-you-get-and-more-and-less
+// New block element types may be introduced by Slack at any time; this is a catch-all for any such block elements.
+type UnknownBlockElement struct {
+ Type MessageElementType `json:"type"`
+ Elements BlockElements
+}
+
+// ElementType returns the type of the Element
+func (s UnknownBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// ImageBlockElement An element to insert an image - this element can be used
+// in section and context blocks only. If you want a block with only an image
+// in it, you're looking for the image block.
+//
+// More Information: https://api.slack.com/reference/messaging/block-elements#image
+type ImageBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ImageURL string `json:"image_url"`
+ AltText string `json:"alt_text"`
+}
+
+// ElementType returns the type of the Element
+func (s ImageBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+func (s ImageBlockElement) MixedElementType() MixedElementType {
+ return MixedElementImage
+}
+
+// NewImageBlockElement returns a new instance of an image block element
+func NewImageBlockElement(imageURL, altText string) *ImageBlockElement {
+ return &ImageBlockElement{
+ Type: METImage,
+ ImageURL: imageURL,
+ AltText: altText,
+ }
+}
+
+// Style is a style of Button element
+// https://api.slack.com/reference/block-kit/block-elements#button__fields
+type Style string
+
+const (
+ StyleDefault Style = ""
+ StylePrimary Style = "primary"
+ StyleDanger Style = "danger"
+)
+
+// ButtonBlockElement defines an interactive element that inserts a button. The
+// button can be a trigger for anything from opening a simple link to starting
+// a complex workflow.
+//
+// More Information: https://api.slack.com/reference/messaging/block-elements#button
+type ButtonBlockElement struct {
+ Type MessageElementType `json:"type,omitempty"`
+ Text *TextBlockObject `json:"text"`
+ ActionID string `json:"action_id,omitempty"`
+ URL string `json:"url,omitempty"`
+ Value string `json:"value,omitempty"`
+ Confirm *ConfirmationBlockObject `json:"confirm,omitempty"`
+ Style Style `json:"style,omitempty"`
+}
+
+// ElementType returns the type of the element
+func (s ButtonBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// WithStyle adds styling to the button object and returns the modified ButtonBlockElement
+func (s *ButtonBlockElement) WithStyle(style Style) *ButtonBlockElement {
+ s.Style = style
+ return s
+}
+
+// WithConfirm adds a confirmation dialogue to the button object and returns the modified ButtonBlockElement
+func (s *ButtonBlockElement) WithConfirm(confirm *ConfirmationBlockObject) *ButtonBlockElement {
+ s.Confirm = confirm
+ return s
+}
+
+// NewButtonBlockElement returns an instance of a new button element to be used within a block
+func NewButtonBlockElement(actionID, value string, text *TextBlockObject) *ButtonBlockElement {
+ return &ButtonBlockElement{
+ Type: METButton,
+ ActionID: actionID,
+ Text: text,
+ Value: value,
+ }
+}
+
+// OptionsResponse defines the response used for select block typahead.
+//
+// More Information: https://api.slack.com/reference/block-kit/block-elements#external_multi_select
+type OptionsResponse struct {
+ Options []*OptionBlockObject `json:"options,omitempty"`
+}
+
+// OptionGroupsResponse defines the response used for select block typahead.
+//
+// More Information: https://api.slack.com/reference/block-kit/block-elements#external_multi_select
+type OptionGroupsResponse struct {
+ OptionGroups []*OptionGroupBlockObject `json:"option_groups,omitempty"`
+}
+
+// SelectBlockElement defines the simplest form of select menu, with a static list
+// of options passed in when defining the element.
+//
+// More Information: https://api.slack.com/reference/messaging/block-elements#select
+type SelectBlockElement struct {
+ Type string `json:"type,omitempty"`
+ Placeholder *TextBlockObject `json:"placeholder,omitempty"`
+ ActionID string `json:"action_id,omitempty"`
+ Options []*OptionBlockObject `json:"options,omitempty"`
+ OptionGroups []*OptionGroupBlockObject `json:"option_groups,omitempty"`
+ InitialOption *OptionBlockObject `json:"initial_option,omitempty"`
+ InitialUser string `json:"initial_user,omitempty"`
+ InitialConversation string `json:"initial_conversation,omitempty"`
+ InitialChannel string `json:"initial_channel,omitempty"`
+ DefaultToCurrentConversation bool `json:"default_to_current_conversation,omitempty"`
+ ResponseURLEnabled bool `json:"response_url_enabled,omitempty"`
+ Filter *SelectBlockElementFilter `json:"filter,omitempty"`
+ MinQueryLength *int `json:"min_query_length,omitempty"`
+ Confirm *ConfirmationBlockObject `json:"confirm,omitempty"`
+}
+
+// SelectBlockElementFilter allows to filter select element conversation options by type.
+//
+// More Information: https://api.slack.com/reference/block-kit/composition-objects#filter_conversations
+type SelectBlockElementFilter struct {
+ Include []string `json:"include,omitempty"`
+ ExcludeExternalSharedChannels bool `json:"exclude_external_shared_channels,omitempty"`
+ ExcludeBotUsers bool `json:"exclude_bot_users,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s SelectBlockElement) ElementType() MessageElementType {
+ return MessageElementType(s.Type)
+}
+
+// NewOptionsSelectBlockElement returns a new instance of SelectBlockElement for use with
+// the Options object only.
+func NewOptionsSelectBlockElement(optType string, placeholder *TextBlockObject, actionID string, options ...*OptionBlockObject) *SelectBlockElement {
+ return &SelectBlockElement{
+ Type: optType,
+ Placeholder: placeholder,
+ ActionID: actionID,
+ Options: options,
+ }
+}
+
+// NewOptionsGroupSelectBlockElement returns a new instance of SelectBlockElement for use with
+// the Options object only.
+func NewOptionsGroupSelectBlockElement(
+ optType string,
+ placeholder *TextBlockObject,
+ actionID string,
+ optGroups ...*OptionGroupBlockObject,
+) *SelectBlockElement {
+ return &SelectBlockElement{
+ Type: optType,
+ Placeholder: placeholder,
+ ActionID: actionID,
+ OptionGroups: optGroups,
+ }
+}
+
+// MultiSelectBlockElement defines a multiselect menu, with a static list
+// of options passed in when defining the element.
+//
+// More Information: https://api.slack.com/reference/messaging/block-elements#multi_select
+type MultiSelectBlockElement struct {
+ Type string `json:"type,omitempty"`
+ Placeholder *TextBlockObject `json:"placeholder,omitempty"`
+ ActionID string `json:"action_id,omitempty"`
+ Options []*OptionBlockObject `json:"options,omitempty"`
+ OptionGroups []*OptionGroupBlockObject `json:"option_groups,omitempty"`
+ InitialOptions []*OptionBlockObject `json:"initial_options,omitempty"`
+ InitialUsers []string `json:"initial_users,omitempty"`
+ InitialConversations []string `json:"initial_conversations,omitempty"`
+ InitialChannels []string `json:"initial_channels,omitempty"`
+ MinQueryLength *int `json:"min_query_length,omitempty"`
+ MaxSelectedItems *int `json:"max_selected_items,omitempty"`
+ Confirm *ConfirmationBlockObject `json:"confirm,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s MultiSelectBlockElement) ElementType() MessageElementType {
+ return MessageElementType(s.Type)
+}
+
+// NewOptionsMultiSelectBlockElement returns a new instance of SelectBlockElement for use with
+// the Options object only.
+func NewOptionsMultiSelectBlockElement(optType string, placeholder *TextBlockObject, actionID string, options ...*OptionBlockObject) *MultiSelectBlockElement {
+ return &MultiSelectBlockElement{
+ Type: optType,
+ Placeholder: placeholder,
+ ActionID: actionID,
+ Options: options,
+ }
+}
+
+// NewOptionsGroupMultiSelectBlockElement returns a new instance of MultiSelectBlockElement for use with
+// the Options object only.
+func NewOptionsGroupMultiSelectBlockElement(
+ optType string,
+ placeholder *TextBlockObject,
+ actionID string,
+ optGroups ...*OptionGroupBlockObject,
+) *MultiSelectBlockElement {
+ return &MultiSelectBlockElement{
+ Type: optType,
+ Placeholder: placeholder,
+ ActionID: actionID,
+ OptionGroups: optGroups,
+ }
+}
+
+// OverflowBlockElement defines the fields needed to use an overflow element.
+// And Overflow Element is like a cross between a button and a select menu -
+// when a user clicks on this overflow button, they will be presented with a
+// list of options to choose from.
+//
+// More Information: https://api.slack.com/reference/messaging/block-elements#overflow
+type OverflowBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ActionID string `json:"action_id,omitempty"`
+ Options []*OptionBlockObject `json:"options"`
+ Confirm *ConfirmationBlockObject `json:"confirm,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s OverflowBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// NewOverflowBlockElement returns an instance of a new Overflow Block Element
+func NewOverflowBlockElement(actionID string, options ...*OptionBlockObject) *OverflowBlockElement {
+ return &OverflowBlockElement{
+ Type: METOverflow,
+ ActionID: actionID,
+ Options: options,
+ }
+}
+
+// DatePickerBlockElement defines an element which lets users easily select a
+// date from a calendar style UI. Date picker elements can be used inside of
+// section and actions blocks.
+//
+// More Information: https://api.slack.com/reference/messaging/block-elements#datepicker
+type DatePickerBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ActionID string `json:"action_id,omitempty"`
+ Placeholder *TextBlockObject `json:"placeholder,omitempty"`
+ InitialDate string `json:"initial_date,omitempty"`
+ Confirm *ConfirmationBlockObject `json:"confirm,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s DatePickerBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// NewDatePickerBlockElement returns an instance of a date picker element
+func NewDatePickerBlockElement(actionID string) *DatePickerBlockElement {
+ return &DatePickerBlockElement{
+ Type: METDatepicker,
+ ActionID: actionID,
+ }
+}
+
+// TimePickerBlockElement defines an element which lets users easily select a
+// time from nice UI. Time picker elements can be used inside of
+// section and actions blocks.
+//
+// More Information: https://api.slack.com/reference/messaging/block-elements#timepicker
+type TimePickerBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ActionID string `json:"action_id,omitempty"`
+ Placeholder *TextBlockObject `json:"placeholder,omitempty"`
+ InitialTime string `json:"initial_time,omitempty"`
+ Confirm *ConfirmationBlockObject `json:"confirm,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s TimePickerBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// NewTimePickerBlockElement returns an instance of a date picker element
+func NewTimePickerBlockElement(actionID string) *TimePickerBlockElement {
+ return &TimePickerBlockElement{
+ Type: METTimepicker,
+ ActionID: actionID,
+ }
+}
+
+// DateTimePickerBlockElement defines an element that allows the selection of both
+// a date and a time of day formatted as a UNIX timestamp.
+// More Information: https://api.slack.com/reference/messaging/block-elements#datetimepicker
+type DateTimePickerBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ActionID string `json:"action_id,omitempty"`
+ InitialDateTime int64 `json:"initial_date_time,omitempty"`
+ Confirm *ConfirmationBlockObject `json:"confirm,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s DateTimePickerBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// NewDatePickerBlockElement returns an instance of a datetime picker element
+func NewDateTimePickerBlockElement(actionID string) *DateTimePickerBlockElement {
+ return &DateTimePickerBlockElement{
+ Type: METDatetimepicker,
+ ActionID: actionID,
+ }
+}
+
+// EmailTextInputBlockElement creates a field where a user can enter email
+// data.
+// email-text-input elements are currently only available in modals.
+//
+// More Information: https://api.slack.com/reference/block-kit/block-elements#email
+type EmailTextInputBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ActionID string `json:"action_id,omitempty"`
+ Placeholder *TextBlockObject `json:"placeholder,omitempty"`
+ InitialValue string `json:"initial_value,omitempty"`
+ DispatchActionConfig *DispatchActionConfig `json:"dispatch_action_config,omitempty"`
+ FocusOnLoad bool `json:"focus_on_load,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s EmailTextInputBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// NewEmailTextInputBlockElement returns an instance of a plain-text input
+// element
+func NewEmailTextInputBlockElement(placeholder *TextBlockObject, actionID string) *EmailTextInputBlockElement {
+ return &EmailTextInputBlockElement{
+ Type: METEmailTextInput,
+ ActionID: actionID,
+ Placeholder: placeholder,
+ }
+}
+
+// URLTextInputBlockElement creates a field where a user can enter url data.
+//
+// url-text-input elements are currently only available in modals.
+//
+// More Information: https://api.slack.com/reference/block-kit/block-elements#url
+type URLTextInputBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ActionID string `json:"action_id,omitempty"`
+ Placeholder *TextBlockObject `json:"placeholder,omitempty"`
+ InitialValue string `json:"initial_value,omitempty"`
+ DispatchActionConfig *DispatchActionConfig `json:"dispatch_action_config,omitempty"`
+ FocusOnLoad bool `json:"focus_on_load,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s URLTextInputBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// NewURLTextInputBlockElement returns an instance of a plain-text input
+// element
+func NewURLTextInputBlockElement(placeholder *TextBlockObject, actionID string) *URLTextInputBlockElement {
+ return &URLTextInputBlockElement{
+ Type: METURLTextInput,
+ ActionID: actionID,
+ Placeholder: placeholder,
+ }
+}
+
+// PlainTextInputBlockElement creates a field where a user can enter freeform
+// data.
+// Plain-text input elements are currently only available in modals.
+//
+// More Information: https://api.slack.com/reference/block-kit/block-elements#input
+type PlainTextInputBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ActionID string `json:"action_id,omitempty"`
+ Placeholder *TextBlockObject `json:"placeholder,omitempty"`
+ InitialValue string `json:"initial_value,omitempty"`
+ Multiline bool `json:"multiline,omitempty"`
+ MinLength int `json:"min_length,omitempty"`
+ MaxLength int `json:"max_length,omitempty"`
+ DispatchActionConfig *DispatchActionConfig `json:"dispatch_action_config,omitempty"`
+}
+
+type DispatchActionConfig struct {
+ TriggerActionsOn []string `json:"trigger_actions_on,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s PlainTextInputBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// NewPlainTextInputBlockElement returns an instance of a plain-text input
+// element
+func NewPlainTextInputBlockElement(placeholder *TextBlockObject, actionID string) *PlainTextInputBlockElement {
+ return &PlainTextInputBlockElement{
+ Type: METPlainTextInput,
+ ActionID: actionID,
+ Placeholder: placeholder,
+ }
+}
+
+// CheckboxGroupsBlockElement defines an element which allows users to choose
+// one or more items from a list of possible options.
+//
+// More Information: https://api.slack.com/reference/block-kit/block-elements#checkboxes
+type CheckboxGroupsBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ActionID string `json:"action_id,omitempty"`
+ Options []*OptionBlockObject `json:"options"`
+ InitialOptions []*OptionBlockObject `json:"initial_options,omitempty"`
+ Confirm *ConfirmationBlockObject `json:"confirm,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (c CheckboxGroupsBlockElement) ElementType() MessageElementType {
+ return c.Type
+}
+
+// NewCheckboxGroupsBlockElement returns an instance of a checkbox-group block element
+func NewCheckboxGroupsBlockElement(actionID string, options ...*OptionBlockObject) *CheckboxGroupsBlockElement {
+ return &CheckboxGroupsBlockElement{
+ Type: METCheckboxGroups,
+ ActionID: actionID,
+ Options: options,
+ }
+}
+
+// RadioButtonsBlockElement defines an element which lets users choose one item
+// from a list of possible options.
+//
+// More Information: https://api.slack.com/reference/block-kit/block-elements#radio
+type RadioButtonsBlockElement struct {
+ Type MessageElementType `json:"type"`
+ ActionID string `json:"action_id,omitempty"`
+ Options []*OptionBlockObject `json:"options"`
+ InitialOption *OptionBlockObject `json:"initial_option,omitempty"`
+ Confirm *ConfirmationBlockObject `json:"confirm,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s RadioButtonsBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// NewRadioButtonsBlockElement returns an instance of a radio buttons element.
+func NewRadioButtonsBlockElement(actionID string, options ...*OptionBlockObject) *RadioButtonsBlockElement {
+ return &RadioButtonsBlockElement{
+ Type: METRadioButtons,
+ ActionID: actionID,
+ Options: options,
+ }
+}
+
+// NumberInputBlockElement creates a field where a user can enter number
+// data.
+// Number input elements are currently only available in modals.
+//
+// More Information: https://api.slack.com/reference/block-kit/block-elements#number
+type NumberInputBlockElement struct {
+ Type MessageElementType `json:"type"`
+ IsDecimalAllowed bool `json:"is_decimal_allowed"`
+ ActionID string `json:"action_id,omitempty"`
+ Placeholder *TextBlockObject `json:"placeholder,omitempty"`
+ InitialValue string `json:"initial_value,omitempty"`
+ MinValue string `json:"min_value,omitempty"`
+ MaxValue string `json:"max_value,omitempty"`
+ DispatchActionConfig *DispatchActionConfig `json:"dispatch_action_config,omitempty"`
+}
+
+// ElementType returns the type of the Element
+func (s NumberInputBlockElement) ElementType() MessageElementType {
+ return s.Type
+}
+
+// NewNumberInputBlockElement returns an instance of a number input element
+func NewNumberInputBlockElement(placeholder *TextBlockObject, actionID string, isDecimalAllowed bool) *NumberInputBlockElement {
+ return &NumberInputBlockElement{
+ Type: METNumber,
+ ActionID: actionID,
+ Placeholder: placeholder,
+ IsDecimalAllowed: isDecimalAllowed,
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/block_file.go b/vendor/github.com/slack-go/slack/block_file.go
new file mode 100644
index 0000000..ac4453f
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_file.go
@@ -0,0 +1,26 @@
+package slack
+
+// FileBlock defines data that is used to display a remote file.
+//
+// More Information: https://api.slack.com/reference/block-kit/blocks#file
+type FileBlock struct {
+ Type MessageBlockType `json:"type"`
+ BlockID string `json:"block_id,omitempty"`
+ ExternalID string `json:"external_id"`
+ Source string `json:"source"`
+}
+
+// BlockType returns the type of the block
+func (s FileBlock) BlockType() MessageBlockType {
+ return s.Type
+}
+
+// NewFileBlock returns a new instance of a file block
+func NewFileBlock(blockID string, externalID string, source string) *FileBlock {
+ return &FileBlock{
+ Type: MBTFile,
+ BlockID: blockID,
+ ExternalID: externalID,
+ Source: source,
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/block_header.go b/vendor/github.com/slack-go/slack/block_header.go
new file mode 100644
index 0000000..6dff4b8
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_header.go
@@ -0,0 +1,38 @@
+package slack
+
+// HeaderBlock defines a new block of type header
+//
+// More Information: https://api.slack.com/reference/messaging/blocks#header
+type HeaderBlock struct {
+ Type MessageBlockType `json:"type"`
+ Text *TextBlockObject `json:"text,omitempty"`
+ BlockID string `json:"block_id,omitempty"`
+}
+
+// BlockType returns the type of the block
+func (s HeaderBlock) BlockType() MessageBlockType {
+ return s.Type
+}
+
+// HeaderBlockOption allows configuration of options for a new header block
+type HeaderBlockOption func(*HeaderBlock)
+
+func HeaderBlockOptionBlockID(blockID string) HeaderBlockOption {
+ return func(block *HeaderBlock) {
+ block.BlockID = blockID
+ }
+}
+
+// NewHeaderBlock returns a new instance of a header block to be rendered
+func NewHeaderBlock(textObj *TextBlockObject, options ...HeaderBlockOption) *HeaderBlock {
+ block := HeaderBlock{
+ Type: MBTHeader,
+ Text: textObj,
+ }
+
+ for _, option := range options {
+ option(&block)
+ }
+
+ return &block
+}
diff --git a/vendor/github.com/slack-go/slack/block_image.go b/vendor/github.com/slack-go/slack/block_image.go
new file mode 100644
index 0000000..90cbd14
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_image.go
@@ -0,0 +1,28 @@
+package slack
+
+// ImageBlock defines data required to display an image as a block element
+//
+// More Information: https://api.slack.com/reference/messaging/blocks#image
+type ImageBlock struct {
+ Type MessageBlockType `json:"type"`
+ ImageURL string `json:"image_url"`
+ AltText string `json:"alt_text"`
+ BlockID string `json:"block_id,omitempty"`
+ Title *TextBlockObject `json:"title,omitempty"`
+}
+
+// BlockType returns the type of the block
+func (s ImageBlock) BlockType() MessageBlockType {
+ return s.Type
+}
+
+// NewImageBlock returns an instance of a new Image Block type
+func NewImageBlock(imageURL, altText, blockID string, title *TextBlockObject) *ImageBlock {
+ return &ImageBlock{
+ Type: MBTImage,
+ ImageURL: imageURL,
+ AltText: altText,
+ BlockID: blockID,
+ Title: title,
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/block_input.go b/vendor/github.com/slack-go/slack/block_input.go
new file mode 100644
index 0000000..78ffcdb
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_input.go
@@ -0,0 +1,30 @@
+package slack
+
+// InputBlock defines data that is used to display user input fields.
+//
+// More Information: https://api.slack.com/reference/block-kit/blocks#input
+type InputBlock struct {
+ Type MessageBlockType `json:"type"`
+ BlockID string `json:"block_id,omitempty"`
+ Label *TextBlockObject `json:"label"`
+ Element BlockElement `json:"element"`
+ Hint *TextBlockObject `json:"hint,omitempty"`
+ Optional bool `json:"optional,omitempty"`
+ DispatchAction bool `json:"dispatch_action,omitempty"`
+}
+
+// BlockType returns the type of the block
+func (s InputBlock) BlockType() MessageBlockType {
+ return s.Type
+}
+
+// NewInputBlock returns a new instance of an input block
+func NewInputBlock(blockID string, label, hint *TextBlockObject, element BlockElement) *InputBlock {
+ return &InputBlock{
+ Type: MBTInput,
+ BlockID: blockID,
+ Label: label,
+ Element: element,
+ Hint: hint,
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/block_object.go b/vendor/github.com/slack-go/slack/block_object.go
new file mode 100644
index 0000000..b39ff22
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_object.go
@@ -0,0 +1,248 @@
+package slack
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+// Block Objects are also known as Composition Objects
+//
+// For more information: https://api.slack.com/reference/messaging/composition-objects
+
+// BlockObject defines an interface that all block object types should
+// implement.
+// @TODO: Is this interface needed?
+
+// blockObject object types
+const (
+ MarkdownType = "mrkdwn"
+ PlainTextType = "plain_text"
+ // The following objects don't actually have types and their corresponding
+ // const values are just for internal use
+ motConfirmation = "confirm"
+ motOption = "option"
+ motOptionGroup = "option_group"
+)
+
+type MessageObjectType string
+
+type blockObject interface {
+ validateType() MessageObjectType
+}
+
+type BlockObjects struct {
+ TextObjects []*TextBlockObject
+ ConfirmationObjects []*ConfirmationBlockObject
+ OptionObjects []*OptionBlockObject
+ OptionGroupObjects []*OptionGroupBlockObject
+}
+
+// UnmarshalJSON implements the Unmarshaller interface for BlockObjects, so that any JSON
+// unmarshalling is delegated and proper type determination can be made before unmarshal
+func (b *BlockObjects) UnmarshalJSON(data []byte) error {
+ var raw []json.RawMessage
+ err := json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ for _, r := range raw {
+ var obj map[string]interface{}
+ err := json.Unmarshal(r, &obj)
+ if err != nil {
+ return err
+ }
+
+ blockObjectType := getBlockObjectType(obj)
+
+ switch blockObjectType {
+ case PlainTextType, MarkdownType:
+ object, err := unmarshalBlockObject(r, &TextBlockObject{})
+ if err != nil {
+ return err
+ }
+ b.TextObjects = append(b.TextObjects, object.(*TextBlockObject))
+ case motConfirmation:
+ object, err := unmarshalBlockObject(r, &ConfirmationBlockObject{})
+ if err != nil {
+ return err
+ }
+ b.ConfirmationObjects = append(b.ConfirmationObjects, object.(*ConfirmationBlockObject))
+ case motOption:
+ object, err := unmarshalBlockObject(r, &OptionBlockObject{})
+ if err != nil {
+ return err
+ }
+ b.OptionObjects = append(b.OptionObjects, object.(*OptionBlockObject))
+ case motOptionGroup:
+ object, err := unmarshalBlockObject(r, &OptionGroupBlockObject{})
+ if err != nil {
+ return err
+ }
+ b.OptionGroupObjects = append(b.OptionGroupObjects, object.(*OptionGroupBlockObject))
+
+ }
+ }
+
+ return nil
+}
+
+// Ideally would have a better way to identify the block objects for
+// type casting at time of unmarshalling, should be adapted if possible
+// to accomplish in a more reliable manner.
+func getBlockObjectType(obj map[string]interface{}) string {
+ if t, ok := obj["type"].(string); ok {
+ return t
+ }
+ if _, ok := obj["confirm"].(string); ok {
+ return "confirm"
+ }
+ if _, ok := obj["options"].(string); ok {
+ return "option_group"
+ }
+ if _, ok := obj["text"].(string); ok {
+ if _, ok := obj["value"].(string); ok {
+ return "option"
+ }
+ }
+ return ""
+}
+
+func unmarshalBlockObject(r json.RawMessage, object blockObject) (blockObject, error) {
+ err := json.Unmarshal(r, object)
+ if err != nil {
+ return nil, err
+ }
+ return object, nil
+}
+
+// TextBlockObject defines a text element object to be used with blocks
+//
+// More Information: https://api.slack.com/reference/messaging/composition-objects#text
+type TextBlockObject struct {
+ Type string `json:"type"`
+ Text string `json:"text"`
+ Emoji bool `json:"emoji,omitempty"`
+ Verbatim bool `json:"verbatim,omitempty"`
+}
+
+// validateType enforces block objects for element and block parameters
+func (s TextBlockObject) validateType() MessageObjectType {
+ return MessageObjectType(s.Type)
+}
+
+// validateType enforces block objects for element and block parameters
+func (s TextBlockObject) MixedElementType() MixedElementType {
+ return MixedElementText
+}
+
+// Validate checks if TextBlockObject has valid values
+func (s TextBlockObject) Validate() error {
+ if s.Type != "plain_text" && s.Type != "mrkdwn" {
+ return errors.New("type must be either of plain_text or mrkdwn")
+ }
+
+ // https://github.com/slack-go/slack/issues/881
+ if s.Type == "mrkdwn" && s.Emoji {
+ return errors.New("emoji cannot be true in mrkdown")
+ }
+
+ return nil
+}
+
+// NewTextBlockObject returns an instance of a new Text Block Object
+func NewTextBlockObject(elementType, text string, emoji, verbatim bool) *TextBlockObject {
+ return &TextBlockObject{
+ Type: elementType,
+ Text: text,
+ Emoji: emoji,
+ Verbatim: verbatim,
+ }
+}
+
+// BlockType returns the type of the block
+func (t TextBlockObject) BlockType() MessageBlockType {
+ if t.Type == "mrkdwn" {
+ return MarkdownType
+ }
+ return PlainTextType
+}
+
+// ConfirmationBlockObject defines a dialog that provides a confirmation step to
+// any interactive element. This dialog will ask the user to confirm their action by
+// offering a confirm and deny buttons.
+//
+// More Information: https://api.slack.com/reference/messaging/composition-objects#confirm
+type ConfirmationBlockObject struct {
+ Title *TextBlockObject `json:"title"`
+ Text *TextBlockObject `json:"text"`
+ Confirm *TextBlockObject `json:"confirm"`
+ Deny *TextBlockObject `json:"deny,omitempty"`
+ Style Style `json:"style,omitempty"`
+}
+
+// validateType enforces block objects for element and block parameters
+func (s ConfirmationBlockObject) validateType() MessageObjectType {
+ return motConfirmation
+}
+
+// WithStyle add styling to confirmation object
+func (s *ConfirmationBlockObject) WithStyle(style Style) *ConfirmationBlockObject {
+ s.Style = style
+ return s
+}
+
+// NewConfirmationBlockObject returns an instance of a new Confirmation Block Object
+func NewConfirmationBlockObject(title, text, confirm, deny *TextBlockObject) *ConfirmationBlockObject {
+ return &ConfirmationBlockObject{
+ Title: title,
+ Text: text,
+ Confirm: confirm,
+ Deny: deny,
+ }
+}
+
+// OptionBlockObject represents a single selectable item in a select menu
+//
+// More Information: https://api.slack.com/reference/messaging/composition-objects#option
+type OptionBlockObject struct {
+ Text *TextBlockObject `json:"text"`
+ Value string `json:"value"`
+ Description *TextBlockObject `json:"description,omitempty"`
+ URL string `json:"url,omitempty"`
+}
+
+// NewOptionBlockObject returns an instance of a new Option Block Element
+func NewOptionBlockObject(value string, text, description *TextBlockObject) *OptionBlockObject {
+ return &OptionBlockObject{
+ Text: text,
+ Value: value,
+ Description: description,
+ }
+}
+
+// validateType enforces block objects for element and block parameters
+func (s OptionBlockObject) validateType() MessageObjectType {
+ return motOption
+}
+
+// OptionGroupBlockObject Provides a way to group options in a select menu.
+//
+// More Information: https://api.slack.com/reference/messaging/composition-objects#option-group
+type OptionGroupBlockObject struct {
+ Label *TextBlockObject `json:"label,omitempty"`
+ Options []*OptionBlockObject `json:"options"`
+}
+
+// validateType enforces block objects for element and block parameters
+func (s OptionGroupBlockObject) validateType() MessageObjectType {
+ return motOptionGroup
+}
+
+// NewOptionGroupBlockElement returns an instance of a new option group block element
+func NewOptionGroupBlockElement(label *TextBlockObject, options ...*OptionBlockObject) *OptionGroupBlockObject {
+ return &OptionGroupBlockObject{
+ Label: label,
+ Options: options,
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/block_rich_text.go b/vendor/github.com/slack-go/slack/block_rich_text.go
new file mode 100644
index 0000000..01e5cdb
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_rich_text.go
@@ -0,0 +1,383 @@
+package slack
+
+import (
+ "encoding/json"
+)
+
+// RichTextBlock defines a new block of type rich_text.
+// More Information: https://api.slack.com/changelog/2019-09-what-they-see-is-what-you-get-and-more-and-less
+type RichTextBlock struct {
+ Type MessageBlockType `json:"type"`
+ BlockID string `json:"block_id,omitempty"`
+ Elements []RichTextElement `json:"elements"`
+}
+
+func (b RichTextBlock) BlockType() MessageBlockType {
+ return b.Type
+}
+
+func (e *RichTextBlock) UnmarshalJSON(b []byte) error {
+ var raw struct {
+ Type MessageBlockType `json:"type"`
+ BlockID string `json:"block_id"`
+ RawElements []json.RawMessage `json:"elements"`
+ }
+ if string(b) == "{}" {
+ return nil
+ }
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ elems := make([]RichTextElement, 0, len(raw.RawElements))
+ for _, r := range raw.RawElements {
+ var s struct {
+ Type RichTextElementType `json:"type"`
+ }
+ if err := json.Unmarshal(r, &s); err != nil {
+ return err
+ }
+ var elem RichTextElement
+ switch s.Type {
+ case RTESection:
+ elem = &RichTextSection{}
+ default:
+ elems = append(elems, &RichTextUnknown{
+ Type: s.Type,
+ Raw: string(r),
+ })
+ continue
+ }
+ if err := json.Unmarshal(r, &elem); err != nil {
+ return err
+ }
+ elems = append(elems, elem)
+ }
+ *e = RichTextBlock{
+ Type: raw.Type,
+ BlockID: raw.BlockID,
+ Elements: elems,
+ }
+ return nil
+}
+
+// NewRichTextBlock returns a new instance of RichText Block.
+func NewRichTextBlock(blockID string, elements ...RichTextElement) *RichTextBlock {
+ return &RichTextBlock{
+ Type: MBTRichText,
+ BlockID: blockID,
+ Elements: elements,
+ }
+}
+
+type RichTextElementType string
+
+type RichTextElement interface {
+ RichTextElementType() RichTextElementType
+}
+
+const (
+ RTEList RichTextElementType = "rich_text_list"
+ RTEPreformatted RichTextElementType = "rich_text_preformatted"
+ RTEQuote RichTextElementType = "rich_text_quote"
+ RTESection RichTextElementType = "rich_text_section"
+ RTEUnknown RichTextElementType = "rich_text_unknown"
+)
+
+type RichTextUnknown struct {
+ Type RichTextElementType
+ Raw string
+}
+
+func (u RichTextUnknown) RichTextElementType() RichTextElementType {
+ return u.Type
+}
+
+type RichTextSection struct {
+ Type RichTextElementType `json:"type"`
+ Elements []RichTextSectionElement `json:"elements"`
+}
+
+// ElementType returns the type of the Element
+func (s RichTextSection) RichTextElementType() RichTextElementType {
+ return s.Type
+}
+
+func (e *RichTextSection) UnmarshalJSON(b []byte) error {
+ var raw struct {
+ RawElements []json.RawMessage `json:"elements"`
+ }
+ if string(b) == "{}" {
+ return nil
+ }
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ elems := make([]RichTextSectionElement, 0, len(raw.RawElements))
+ for _, r := range raw.RawElements {
+ var s struct {
+ Type RichTextSectionElementType `json:"type"`
+ }
+ if err := json.Unmarshal(r, &s); err != nil {
+ return err
+ }
+ var elem RichTextSectionElement
+ switch s.Type {
+ case RTSEText:
+ elem = &RichTextSectionTextElement{}
+ case RTSEChannel:
+ elem = &RichTextSectionChannelElement{}
+ case RTSEUser:
+ elem = &RichTextSectionUserElement{}
+ case RTSEEmoji:
+ elem = &RichTextSectionEmojiElement{}
+ case RTSELink:
+ elem = &RichTextSectionLinkElement{}
+ case RTSETeam:
+ elem = &RichTextSectionTeamElement{}
+ case RTSEUserGroup:
+ elem = &RichTextSectionUserGroupElement{}
+ case RTSEDate:
+ elem = &RichTextSectionDateElement{}
+ case RTSEBroadcast:
+ elem = &RichTextSectionBroadcastElement{}
+ case RTSEColor:
+ elem = &RichTextSectionColorElement{}
+ default:
+ elems = append(elems, &RichTextSectionUnknownElement{
+ Type: s.Type,
+ Raw: string(r),
+ })
+ continue
+ }
+ if err := json.Unmarshal(r, elem); err != nil {
+ return err
+ }
+ elems = append(elems, elem)
+ }
+ *e = RichTextSection{
+ Type: RTESection,
+ Elements: elems,
+ }
+ return nil
+}
+
+// NewRichTextSectionBlockElement .
+func NewRichTextSection(elements ...RichTextSectionElement) *RichTextSection {
+ return &RichTextSection{
+ Type: RTESection,
+ Elements: elements,
+ }
+}
+
+type RichTextSectionElementType string
+
+const (
+ RTSEBroadcast RichTextSectionElementType = "broadcast"
+ RTSEChannel RichTextSectionElementType = "channel"
+ RTSEColor RichTextSectionElementType = "color"
+ RTSEDate RichTextSectionElementType = "date"
+ RTSEEmoji RichTextSectionElementType = "emoji"
+ RTSELink RichTextSectionElementType = "link"
+ RTSETeam RichTextSectionElementType = "team"
+ RTSEText RichTextSectionElementType = "text"
+ RTSEUser RichTextSectionElementType = "user"
+ RTSEUserGroup RichTextSectionElementType = "usergroup"
+
+ RTSEUnknown RichTextSectionElementType = "unknown"
+)
+
+type RichTextSectionElement interface {
+ RichTextSectionElementType() RichTextSectionElementType
+}
+
+type RichTextSectionTextStyle struct {
+ Bold bool `json:"bold,omitempty"`
+ Italic bool `json:"italic,omitempty"`
+ Strike bool `json:"strike,omitempty"`
+ Code bool `json:"code,omitempty"`
+}
+
+type RichTextSectionTextElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ Text string `json:"text"`
+ Style *RichTextSectionTextStyle `json:"style,omitempty"`
+}
+
+func (r RichTextSectionTextElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionTextElement(text string, style *RichTextSectionTextStyle) *RichTextSectionTextElement {
+ return &RichTextSectionTextElement{
+ Type: RTSEText,
+ Text: text,
+ Style: style,
+ }
+}
+
+type RichTextSectionChannelElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ ChannelID string `json:"channel_id"`
+ Style *RichTextSectionTextStyle `json:"style,omitempty"`
+}
+
+func (r RichTextSectionChannelElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionChannelElement(channelID string, style *RichTextSectionTextStyle) *RichTextSectionChannelElement {
+ return &RichTextSectionChannelElement{
+ Type: RTSEText,
+ ChannelID: channelID,
+ Style: style,
+ }
+}
+
+type RichTextSectionUserElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ UserID string `json:"user_id"`
+ Style *RichTextSectionTextStyle `json:"style,omitempty"`
+}
+
+func (r RichTextSectionUserElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionUserElement(userID string, style *RichTextSectionTextStyle) *RichTextSectionUserElement {
+ return &RichTextSectionUserElement{
+ Type: RTSEUser,
+ UserID: userID,
+ Style: style,
+ }
+}
+
+type RichTextSectionEmojiElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ Name string `json:"name"`
+ SkinTone int `json:"skin_tone"`
+ Style *RichTextSectionTextStyle `json:"style,omitempty"`
+}
+
+func (r RichTextSectionEmojiElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionEmojiElement(name string, skinTone int, style *RichTextSectionTextStyle) *RichTextSectionEmojiElement {
+ return &RichTextSectionEmojiElement{
+ Type: RTSEEmoji,
+ Name: name,
+ SkinTone: skinTone,
+ Style: style,
+ }
+}
+
+type RichTextSectionLinkElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ URL string `json:"url"`
+ Text string `json:"text"`
+ Style *RichTextSectionTextStyle `json:"style,omitempty"`
+}
+
+func (r RichTextSectionLinkElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionLinkElement(url, text string, style *RichTextSectionTextStyle) *RichTextSectionLinkElement {
+ return &RichTextSectionLinkElement{
+ Type: RTSELink,
+ URL: url,
+ Text: text,
+ Style: style,
+ }
+}
+
+type RichTextSectionTeamElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ TeamID string `json:"team_id"`
+ Style *RichTextSectionTextStyle `json:"style,omitempty"`
+}
+
+func (r RichTextSectionTeamElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionTeamElement(teamID string, style *RichTextSectionTextStyle) *RichTextSectionTeamElement {
+ return &RichTextSectionTeamElement{
+ Type: RTSETeam,
+ TeamID: teamID,
+ Style: style,
+ }
+}
+
+type RichTextSectionUserGroupElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ UsergroupID string `json:"usergroup_id"`
+}
+
+func (r RichTextSectionUserGroupElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionUserGroupElement(usergroupID string) *RichTextSectionUserGroupElement {
+ return &RichTextSectionUserGroupElement{
+ Type: RTSEUserGroup,
+ UsergroupID: usergroupID,
+ }
+}
+
+type RichTextSectionDateElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ Timestamp JSONTime `json:"timestamp"`
+}
+
+func (r RichTextSectionDateElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionDateElement(timestamp int64) *RichTextSectionDateElement {
+ return &RichTextSectionDateElement{
+ Type: RTSEDate,
+ Timestamp: JSONTime(timestamp),
+ }
+}
+
+type RichTextSectionBroadcastElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ Range string `json:"range"`
+}
+
+func (r RichTextSectionBroadcastElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionBroadcastElement(rangeStr string) *RichTextSectionBroadcastElement {
+ return &RichTextSectionBroadcastElement{
+ Type: RTSEBroadcast,
+ Range: rangeStr,
+ }
+}
+
+type RichTextSectionColorElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ Value string `json:"value"`
+}
+
+func (r RichTextSectionColorElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
+
+func NewRichTextSectionColorElement(value string) *RichTextSectionColorElement {
+ return &RichTextSectionColorElement{
+ Type: RTSEColor,
+ Value: value,
+ }
+}
+
+type RichTextSectionUnknownElement struct {
+ Type RichTextSectionElementType `json:"type"`
+ Raw string
+}
+
+func (r RichTextSectionUnknownElement) RichTextSectionElementType() RichTextSectionElementType {
+ return r.Type
+}
diff --git a/vendor/github.com/slack-go/slack/block_section.go b/vendor/github.com/slack-go/slack/block_section.go
new file mode 100644
index 0000000..01ffd5a
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_section.go
@@ -0,0 +1,42 @@
+package slack
+
+// SectionBlock defines a new block of type section
+//
+// More Information: https://api.slack.com/reference/messaging/blocks#section
+type SectionBlock struct {
+ Type MessageBlockType `json:"type"`
+ Text *TextBlockObject `json:"text,omitempty"`
+ BlockID string `json:"block_id,omitempty"`
+ Fields []*TextBlockObject `json:"fields,omitempty"`
+ Accessory *Accessory `json:"accessory,omitempty"`
+}
+
+// BlockType returns the type of the block
+func (s SectionBlock) BlockType() MessageBlockType {
+ return s.Type
+}
+
+// SectionBlockOption allows configuration of options for a new section block
+type SectionBlockOption func(*SectionBlock)
+
+func SectionBlockOptionBlockID(blockID string) SectionBlockOption {
+ return func(block *SectionBlock) {
+ block.BlockID = blockID
+ }
+}
+
+// NewSectionBlock returns a new instance of a section block to be rendered
+func NewSectionBlock(textObj *TextBlockObject, fields []*TextBlockObject, accessory *Accessory, options ...SectionBlockOption) *SectionBlock {
+ block := SectionBlock{
+ Type: MBTSection,
+ Text: textObj,
+ Fields: fields,
+ Accessory: accessory,
+ }
+
+ for _, option := range options {
+ option(&block)
+ }
+
+ return &block
+}
diff --git a/vendor/github.com/slack-go/slack/block_unknown.go b/vendor/github.com/slack-go/slack/block_unknown.go
new file mode 100644
index 0000000..97054c7
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/block_unknown.go
@@ -0,0 +1,13 @@
+package slack
+
+// UnknownBlock represents a block type that is not yet known. This block type exists to prevent Slack from introducing
+// new and unknown block types that break this library.
+type UnknownBlock struct {
+ Type MessageBlockType `json:"type"`
+ BlockID string `json:"block_id,omitempty"`
+}
+
+// BlockType returns the type of the block
+func (b UnknownBlock) BlockType() MessageBlockType {
+ return b.Type
+}
diff --git a/vendor/github.com/slack-go/slack/bookmarks.go b/vendor/github.com/slack-go/slack/bookmarks.go
new file mode 100644
index 0000000..7875350
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/bookmarks.go
@@ -0,0 +1,159 @@
+package slack
+
+import (
+ "context"
+ "net/url"
+)
+
+type Bookmark struct {
+ ID string `json:"id"`
+ ChannelID string `json:"channel_id"`
+ Title string `json:"title"`
+ Link string `json:"link"`
+ Emoji string `json:"emoji"`
+ IconURL string `json:"icon_url"`
+ Type string `json:"type"`
+ Created JSONTime `json:"date_created"`
+ Updated JSONTime `json:"date_updated"`
+ Rank string `json:"rank"`
+
+ LastUpdatedByUserID string `json:"last_updated_by_user_id"`
+ LastUpdatedByTeamID string `json:"last_updated_by_team_id"`
+
+ ShortcutID string `json:"shortcut_id"`
+ EntityID string `json:"entity_id"`
+ AppID string `json:"app_id"`
+}
+
+type AddBookmarkParameters struct {
+ Title string // A required title for the bookmark
+ Type string // A required type for the bookmark
+ Link string // URL required for type:link
+ Emoji string // An optional emoji
+ EntityID string
+ ParentID string
+}
+
+type EditBookmarkParameters struct {
+ Title *string // Change the title. Set to "" to clear
+ Emoji *string // Change the emoji. Set to "" to clear
+ Link string // Change the link
+}
+
+type addBookmarkResponse struct {
+ Bookmark Bookmark `json:"bookmark"`
+ SlackResponse
+}
+
+type editBookmarkResponse struct {
+ Bookmark Bookmark `json:"bookmark"`
+ SlackResponse
+}
+
+type listBookmarksResponse struct {
+ Bookmarks []Bookmark `json:"bookmarks"`
+ SlackResponse
+}
+
+// AddBookmark adds a bookmark in a channel
+func (api *Client) AddBookmark(channelID string, params AddBookmarkParameters) (Bookmark, error) {
+ return api.AddBookmarkContext(context.Background(), channelID, params)
+}
+
+// AddBookmarkContext adds a bookmark in a channel with a custom context
+func (api *Client) AddBookmarkContext(ctx context.Context, channelID string, params AddBookmarkParameters) (Bookmark, error) {
+ values := url.Values{
+ "channel_id": {channelID},
+ "token": {api.token},
+ "title": {params.Title},
+ "type": {params.Type},
+ }
+ if params.Link != "" {
+ values.Set("link", params.Link)
+ }
+ if params.Emoji != "" {
+ values.Set("emoji", params.Emoji)
+ }
+ if params.EntityID != "" {
+ values.Set("entity_id", params.EntityID)
+ }
+ if params.ParentID != "" {
+ values.Set("parent_id", params.ParentID)
+ }
+
+ response := &addBookmarkResponse{}
+ if err := api.postMethod(ctx, "bookmarks.add", values, response); err != nil {
+ return Bookmark{}, err
+ }
+
+ return response.Bookmark, response.Err()
+}
+
+// RemoveBookmark removes a bookmark from a channel
+func (api *Client) RemoveBookmark(channelID, bookmarkID string) error {
+ return api.RemoveBookmarkContext(context.Background(), channelID, bookmarkID)
+}
+
+// RemoveBookmarkContext removes a bookmark from a channel with a custom context
+func (api *Client) RemoveBookmarkContext(ctx context.Context, channelID, bookmarkID string) error {
+ values := url.Values{
+ "channel_id": {channelID},
+ "token": {api.token},
+ "bookmark_id": {bookmarkID},
+ }
+
+ response := &SlackResponse{}
+ if err := api.postMethod(ctx, "bookmarks.remove", values, response); err != nil {
+ return err
+ }
+
+ return response.Err()
+}
+
+// ListBookmarks returns all bookmarks for a channel.
+func (api *Client) ListBookmarks(channelID string) ([]Bookmark, error) {
+ return api.ListBookmarksContext(context.Background(), channelID)
+}
+
+// ListBookmarksContext returns all bookmarks for a channel with a custom context.
+func (api *Client) ListBookmarksContext(ctx context.Context, channelID string) ([]Bookmark, error) {
+ values := url.Values{
+ "channel_id": {channelID},
+ "token": {api.token},
+ }
+
+ response := &listBookmarksResponse{}
+ err := api.postMethod(ctx, "bookmarks.list", values, response)
+ if err != nil {
+ return nil, err
+ }
+ return response.Bookmarks, response.Err()
+}
+
+func (api *Client) EditBookmark(channelID, bookmarkID string, params EditBookmarkParameters) (Bookmark, error) {
+ return api.EditBookmarkContext(context.Background(), channelID, bookmarkID, params)
+}
+
+func (api *Client) EditBookmarkContext(ctx context.Context, channelID, bookmarkID string, params EditBookmarkParameters) (Bookmark, error) {
+ values := url.Values{
+ "channel_id": {channelID},
+ "token": {api.token},
+ "bookmark_id": {bookmarkID},
+ }
+ if params.Link != "" {
+ values.Set("link", params.Link)
+ }
+ if params.Emoji != nil {
+ values.Set("emoji", *params.Emoji)
+ }
+ if params.Title != nil {
+ values.Set("title", *params.Title)
+ }
+
+ response := &editBookmarkResponse{}
+ if err := api.postMethod(ctx, "bookmarks.edit", values, response); err != nil {
+ return Bookmark{}, err
+ }
+
+ return response.Bookmark, response.Err()
+}
diff --git a/vendor/github.com/slack-go/slack/bots.go b/vendor/github.com/slack-go/slack/bots.go
new file mode 100644
index 0000000..da21ba0
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/bots.go
@@ -0,0 +1,58 @@
+package slack
+
+import (
+ "context"
+ "net/url"
+)
+
+// Bot contains information about a bot
+type Bot struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Deleted bool `json:"deleted"`
+ UserID string `json:"user_id"`
+ AppID string `json:"app_id"`
+ Updated JSONTime `json:"updated"`
+ Icons Icons `json:"icons"`
+}
+
+type botResponseFull struct {
+ Bot `json:"bot,omitempty"` // GetBotInfo
+ SlackResponse
+}
+
+func (api *Client) botRequest(ctx context.Context, path string, values url.Values) (*botResponseFull, error) {
+ response := &botResponseFull{}
+ err := api.postMethod(ctx, path, values, response)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := response.Err(); err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// GetBotInfo will retrieve the complete bot information
+func (api *Client) GetBotInfo(bot string) (*Bot, error) {
+ return api.GetBotInfoContext(context.Background(), bot)
+}
+
+// GetBotInfoContext will retrieve the complete bot information using a custom context
+func (api *Client) GetBotInfoContext(ctx context.Context, bot string) (*Bot, error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+
+ if bot != "" {
+ values.Add("bot", bot)
+ }
+
+ response, err := api.botRequest(ctx, "bots.info", values)
+ if err != nil {
+ return nil, err
+ }
+ return &response.Bot, nil
+}
diff --git a/vendor/github.com/slack-go/slack/channels.go b/vendor/github.com/slack-go/slack/channels.go
new file mode 100644
index 0000000..2fca8b9
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/channels.go
@@ -0,0 +1,36 @@
+package slack
+
+import (
+ "context"
+ "net/url"
+)
+
+type channelResponseFull struct {
+ Channel Channel `json:"channel"`
+ Channels []Channel `json:"channels"`
+ Purpose string `json:"purpose"`
+ Topic string `json:"topic"`
+ NotInChannel bool `json:"not_in_channel"`
+ History
+ SlackResponse
+ Metadata ResponseMetadata `json:"response_metadata"`
+}
+
+// Channel contains information about the channel
+type Channel struct {
+ GroupConversation
+ IsChannel bool `json:"is_channel"`
+ IsGeneral bool `json:"is_general"`
+ IsMember bool `json:"is_member"`
+ Locale string `json:"locale"`
+}
+
+func (api *Client) channelRequest(ctx context.Context, path string, values url.Values) (*channelResponseFull, error) {
+ response := &channelResponseFull{}
+ err := postForm(ctx, api.httpclient, api.endpoint+path, values, response, api)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, response.Err()
+}
diff --git a/vendor/github.com/slack-go/slack/chat.go b/vendor/github.com/slack-go/slack/chat.go
new file mode 100644
index 0000000..35ffbbc
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/chat.go
@@ -0,0 +1,876 @@
+package slack
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+
+ "github.com/slack-go/slack/slackutilsx"
+)
+
+const (
+ DEFAULT_MESSAGE_USERNAME = ""
+ DEFAULT_MESSAGE_REPLY_BROADCAST = false
+ DEFAULT_MESSAGE_ASUSER = false
+ DEFAULT_MESSAGE_PARSE = ""
+ DEFAULT_MESSAGE_THREAD_TIMESTAMP = ""
+ DEFAULT_MESSAGE_LINK_NAMES = 0
+ DEFAULT_MESSAGE_UNFURL_LINKS = false
+ DEFAULT_MESSAGE_UNFURL_MEDIA = true
+ DEFAULT_MESSAGE_ICON_URL = ""
+ DEFAULT_MESSAGE_ICON_EMOJI = ""
+ DEFAULT_MESSAGE_MARKDOWN = true
+ DEFAULT_MESSAGE_ESCAPE_TEXT = true
+)
+
+type chatResponseFull struct {
+ Channel string `json:"channel"`
+ Timestamp string `json:"ts"` // Regular message timestamp
+ MessageTimeStamp string `json:"message_ts"` // Ephemeral message timestamp
+ ScheduledMessageID string `json:"scheduled_message_id,omitempty"` // Scheduled message id
+ Text string `json:"text"`
+ SlackResponse
+}
+
+// getMessageTimestamp will inspect the `chatResponseFull` to ruturn a timestamp value
+// in `chat.postMessage` its under `ts`
+// in `chat.postEphemeral` its under `message_ts`
+func (c chatResponseFull) getMessageTimestamp() string {
+ if len(c.Timestamp) > 0 {
+ return c.Timestamp
+ }
+ return c.MessageTimeStamp
+}
+
+// PostMessageParameters contains all the parameters necessary (including the optional ones) for a PostMessage() request
+type PostMessageParameters struct {
+ Username string `json:"username"`
+ AsUser bool `json:"as_user"`
+ Parse string `json:"parse"`
+ ThreadTimestamp string `json:"thread_ts"`
+ ReplyBroadcast bool `json:"reply_broadcast"`
+ LinkNames int `json:"link_names"`
+ UnfurlLinks bool `json:"unfurl_links"`
+ UnfurlMedia bool `json:"unfurl_media"`
+ IconURL string `json:"icon_url"`
+ IconEmoji string `json:"icon_emoji"`
+ Markdown bool `json:"mrkdwn,omitempty"`
+ EscapeText bool `json:"escape_text"`
+
+ // chat.postEphemeral support
+ Channel string `json:"channel"`
+ User string `json:"user"`
+
+ // chat metadata support
+ MetaData SlackMetadata `json:"metadata"`
+}
+
+// NewPostMessageParameters provides an instance of PostMessageParameters with all the sane default values set
+func NewPostMessageParameters() PostMessageParameters {
+ return PostMessageParameters{
+ Username: DEFAULT_MESSAGE_USERNAME,
+ User: DEFAULT_MESSAGE_USERNAME,
+ AsUser: DEFAULT_MESSAGE_ASUSER,
+ Parse: DEFAULT_MESSAGE_PARSE,
+ ThreadTimestamp: DEFAULT_MESSAGE_THREAD_TIMESTAMP,
+ LinkNames: DEFAULT_MESSAGE_LINK_NAMES,
+ UnfurlLinks: DEFAULT_MESSAGE_UNFURL_LINKS,
+ UnfurlMedia: DEFAULT_MESSAGE_UNFURL_MEDIA,
+ IconURL: DEFAULT_MESSAGE_ICON_URL,
+ IconEmoji: DEFAULT_MESSAGE_ICON_EMOJI,
+ Markdown: DEFAULT_MESSAGE_MARKDOWN,
+ EscapeText: DEFAULT_MESSAGE_ESCAPE_TEXT,
+ }
+}
+
+// DeleteMessage deletes a message in a channel
+func (api *Client) DeleteMessage(channel, messageTimestamp string) (string, string, error) {
+ return api.DeleteMessageContext(context.Background(), channel, messageTimestamp)
+}
+
+// DeleteMessageContext deletes a message in a channel with a custom context
+func (api *Client) DeleteMessageContext(ctx context.Context, channel, messageTimestamp string) (string, string, error) {
+ respChannel, respTimestamp, _, err := api.SendMessageContext(
+ ctx,
+ channel,
+ MsgOptionDelete(messageTimestamp),
+ )
+ return respChannel, respTimestamp, err
+}
+
+// ScheduleMessage sends a message to a channel.
+// Message is escaped by default according to https://api.slack.com/docs/formatting
+// Use http://davestevens.github.io/slack-message-builder/ to help crafting your message.
+func (api *Client) ScheduleMessage(channelID, postAt string, options ...MsgOption) (string, string, error) {
+ return api.ScheduleMessageContext(context.Background(), channelID, postAt, options...)
+}
+
+// ScheduleMessageContext sends a message to a channel with a custom context
+//
+// For more details, see ScheduleMessage documentation.
+func (api *Client) ScheduleMessageContext(ctx context.Context, channelID, postAt string, options ...MsgOption) (string, string, error) {
+ respChannel, respTimestamp, _, err := api.SendMessageContext(
+ ctx,
+ channelID,
+ MsgOptionSchedule(postAt),
+ MsgOptionCompose(options...),
+ )
+ return respChannel, respTimestamp, err
+}
+
+// PostMessage sends a message to a channel.
+// Message is escaped by default according to https://api.slack.com/docs/formatting
+// Use http://davestevens.github.io/slack-message-builder/ to help crafting your message.
+func (api *Client) PostMessage(channelID string, options ...MsgOption) (string, string, error) {
+ return api.PostMessageContext(context.Background(), channelID, options...)
+}
+
+// PostMessageContext sends a message to a channel with a custom context
+// For more details, see PostMessage documentation.
+func (api *Client) PostMessageContext(ctx context.Context, channelID string, options ...MsgOption) (string, string, error) {
+ respChannel, respTimestamp, _, err := api.SendMessageContext(
+ ctx,
+ channelID,
+ MsgOptionPost(),
+ MsgOptionCompose(options...),
+ )
+ return respChannel, respTimestamp, err
+}
+
+// PostEphemeral sends an ephemeral message to a user in a channel.
+// Message is escaped by default according to https://api.slack.com/docs/formatting
+// Use http://davestevens.github.io/slack-message-builder/ to help crafting your message.
+func (api *Client) PostEphemeral(channelID, userID string, options ...MsgOption) (string, error) {
+ return api.PostEphemeralContext(context.Background(), channelID, userID, options...)
+}
+
+// PostEphemeralContext sends an ephemeal message to a user in a channel with a custom context
+// For more details, see PostEphemeral documentation
+func (api *Client) PostEphemeralContext(ctx context.Context, channelID, userID string, options ...MsgOption) (timestamp string, err error) {
+ _, timestamp, _, err = api.SendMessageContext(
+ ctx,
+ channelID,
+ MsgOptionPostEphemeral(userID),
+ MsgOptionCompose(options...),
+ )
+ return timestamp, err
+}
+
+// UpdateMessage updates a message in a channel
+func (api *Client) UpdateMessage(channelID, timestamp string, options ...MsgOption) (string, string, string, error) {
+ return api.UpdateMessageContext(context.Background(), channelID, timestamp, options...)
+}
+
+// UpdateMessageContext updates a message in a channel
+func (api *Client) UpdateMessageContext(ctx context.Context, channelID, timestamp string, options ...MsgOption) (string, string, string, error) {
+ return api.SendMessageContext(
+ ctx,
+ channelID,
+ MsgOptionUpdate(timestamp),
+ MsgOptionCompose(options...),
+ )
+}
+
+// UnfurlMessage unfurls a message in a channel
+func (api *Client) UnfurlMessage(channelID, timestamp string, unfurls map[string]Attachment, options ...MsgOption) (string, string, string, error) {
+ return api.UnfurlMessageContext(context.Background(), channelID, timestamp, unfurls, options...)
+}
+
+// UnfurlMessageContext unfurls a message in a channel with a custom context
+func (api *Client) UnfurlMessageContext(ctx context.Context, channelID, timestamp string, unfurls map[string]Attachment, options ...MsgOption) (string, string, string, error) {
+ return api.SendMessageContext(ctx, channelID, MsgOptionUnfurl(timestamp, unfurls), MsgOptionCompose(options...))
+}
+
+// UnfurlMessageWithAuthURL sends an unfurl request containing an
+// authentication URL.
+// For more details see:
+// https://api.slack.com/reference/messaging/link-unfurling#authenticated_unfurls
+func (api *Client) UnfurlMessageWithAuthURL(channelID, timestamp string, userAuthURL string, options ...MsgOption) (string, string, string, error) {
+ return api.UnfurlMessageWithAuthURLContext(context.Background(), channelID, timestamp, userAuthURL, options...)
+}
+
+// UnfurlMessageWithAuthURLContext sends an unfurl request containing an
+// authentication URL.
+// For more details see:
+// https://api.slack.com/reference/messaging/link-unfurling#authenticated_unfurls
+func (api *Client) UnfurlMessageWithAuthURLContext(ctx context.Context, channelID, timestamp string, userAuthURL string, options ...MsgOption) (string, string, string, error) {
+ return api.SendMessageContext(ctx, channelID, MsgOptionUnfurlAuthURL(timestamp, userAuthURL), MsgOptionCompose(options...))
+}
+
+// SendMessage more flexible method for configuring messages.
+func (api *Client) SendMessage(channel string, options ...MsgOption) (string, string, string, error) {
+ return api.SendMessageContext(context.Background(), channel, options...)
+}
+
+// SendMessageContext more flexible method for configuring messages with a custom context.
+func (api *Client) SendMessageContext(ctx context.Context, channelID string, options ...MsgOption) (_channel string, _timestamp string, _text string, err error) {
+ var (
+ req *http.Request
+ parser func(*chatResponseFull) responseParser
+ response chatResponseFull
+ )
+
+ if req, parser, err = buildSender(api.endpoint, options...).BuildRequestContext(ctx, api.token, channelID); err != nil {
+ return "", "", "", err
+ }
+
+ if api.Debug() {
+ reqBody, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ return "", "", "", err
+ }
+ req.Body = ioutil.NopCloser(bytes.NewBuffer(reqBody))
+ api.Debugf("Sending request: %s", redactToken(reqBody))
+ }
+
+ if err = doPost(ctx, api.httpclient, req, parser(&response), api); err != nil {
+ return "", "", "", err
+ }
+
+ return response.Channel, response.getMessageTimestamp(), response.Text, response.Err()
+}
+
+func redactToken(b []byte) []byte {
+ // See https://api.slack.com/authentication/token-types
+ // and https://api.slack.com/authentication/rotation
+ re, err := regexp.Compile(`(token=x[a-z.]+)-[0-9A-Za-z-]+`)
+ if err != nil {
+ // The regular expression above should never result in errors,
+ // but just in case, do no harm.
+ return b
+ }
+ // Keep "token=" and the first element of the token, which identifies its type
+ // (this could be useful for debugging, e.g. when using a wrong token).
+ return re.ReplaceAll(b, []byte("$1-REDACTED"))
+}
+
+// UnsafeApplyMsgOptions utility function for debugging/testing chat requests.
+// NOTE: USE AT YOUR OWN RISK: No issues relating to the use of this function
+// will be supported by the library.
+func UnsafeApplyMsgOptions(token, channel, apiurl string, options ...MsgOption) (string, url.Values, error) {
+ config, err := applyMsgOptions(token, channel, apiurl, options...)
+ return config.endpoint, config.values, err
+}
+
+func applyMsgOptions(token, channel, apiurl string, options ...MsgOption) (sendConfig, error) {
+ config := sendConfig{
+ apiurl: apiurl,
+ endpoint: apiurl + string(chatPostMessage),
+ values: url.Values{
+ "token": {token},
+ "channel": {channel},
+ },
+ }
+
+ for _, opt := range options {
+ if err := opt(&config); err != nil {
+ return config, err
+ }
+ }
+
+ return config, nil
+}
+
+func buildSender(apiurl string, options ...MsgOption) sendConfig {
+ return sendConfig{
+ apiurl: apiurl,
+ options: options,
+ }
+}
+
+type sendMode string
+
+const (
+ chatUpdate sendMode = "chat.update"
+ chatPostMessage sendMode = "chat.postMessage"
+ chatScheduleMessage sendMode = "chat.scheduleMessage"
+ chatDelete sendMode = "chat.delete"
+ chatPostEphemeral sendMode = "chat.postEphemeral"
+ chatResponse sendMode = "chat.responseURL"
+ chatMeMessage sendMode = "chat.meMessage"
+ chatUnfurl sendMode = "chat.unfurl"
+)
+
+type sendConfig struct {
+ apiurl string
+ options []MsgOption
+ mode sendMode
+ endpoint string
+ values url.Values
+ attachments []Attachment
+ metadata SlackMetadata
+ blocks Blocks
+ responseType string
+ replaceOriginal bool
+ deleteOriginal bool
+}
+
+func (t sendConfig) BuildRequest(token, channelID string) (req *http.Request, _ func(*chatResponseFull) responseParser, err error) {
+ return t.BuildRequestContext(context.Background(), token, channelID)
+}
+
+func (t sendConfig) BuildRequestContext(ctx context.Context, token, channelID string) (req *http.Request, _ func(*chatResponseFull) responseParser, err error) {
+ if t, err = applyMsgOptions(token, channelID, t.apiurl, t.options...); err != nil {
+ return nil, nil, err
+ }
+
+ switch t.mode {
+ case chatResponse:
+ return responseURLSender{
+ endpoint: t.endpoint,
+ values: t.values,
+ attachments: t.attachments,
+ metadata: t.metadata,
+ blocks: t.blocks,
+ responseType: t.responseType,
+ replaceOriginal: t.replaceOriginal,
+ deleteOriginal: t.deleteOriginal,
+ }.BuildRequestContext(ctx)
+ default:
+ return formSender{endpoint: t.endpoint, values: t.values}.BuildRequestContext(ctx)
+ }
+}
+
+type formSender struct {
+ endpoint string
+ values url.Values
+}
+
+func (t formSender) BuildRequest() (*http.Request, func(*chatResponseFull) responseParser, error) {
+ return t.BuildRequestContext(context.Background())
+}
+
+func (t formSender) BuildRequestContext(ctx context.Context) (*http.Request, func(*chatResponseFull) responseParser, error) {
+ req, err := formReq(ctx, t.endpoint, t.values)
+ return req, func(resp *chatResponseFull) responseParser {
+ return newJSONParser(resp)
+ }, err
+}
+
+type responseURLSender struct {
+ endpoint string
+ values url.Values
+ attachments []Attachment
+ metadata SlackMetadata
+ blocks Blocks
+ responseType string
+ replaceOriginal bool
+ deleteOriginal bool
+}
+
+func (t responseURLSender) BuildRequest() (*http.Request, func(*chatResponseFull) responseParser, error) {
+ return t.BuildRequestContext(context.Background())
+}
+
+func (t responseURLSender) BuildRequestContext(ctx context.Context) (*http.Request, func(*chatResponseFull) responseParser, error) {
+ req, err := jsonReq(ctx, t.endpoint, Msg{
+ Text: t.values.Get("text"),
+ Timestamp: t.values.Get("ts"),
+ Attachments: t.attachments,
+ Blocks: t.blocks,
+ Metadata: t.metadata,
+ ResponseType: t.responseType,
+ ReplaceOriginal: t.replaceOriginal,
+ DeleteOriginal: t.deleteOriginal,
+ })
+ return req, func(resp *chatResponseFull) responseParser {
+ return newContentTypeParser(resp)
+ }, err
+}
+
+// MsgOption option provided when sending a message.
+type MsgOption func(*sendConfig) error
+
+// MsgOptionSchedule schedules a messages.
+func MsgOptionSchedule(postAt string) MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatScheduleMessage)
+ config.values.Add("post_at", postAt)
+ return nil
+ }
+}
+
+// MsgOptionPost posts a messages, this is the default.
+func MsgOptionPost() MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatPostMessage)
+ config.values.Del("ts")
+ return nil
+ }
+}
+
+// MsgOptionPostEphemeral - posts an ephemeral message to the provided user.
+func MsgOptionPostEphemeral(userID string) MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatPostEphemeral)
+ MsgOptionUser(userID)(config)
+ config.values.Del("ts")
+
+ return nil
+ }
+}
+
+// MsgOptionMeMessage posts a "me message" type from the calling user
+func MsgOptionMeMessage() MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatMeMessage)
+ return nil
+ }
+}
+
+// MsgOptionUpdate updates a message based on the timestamp.
+func MsgOptionUpdate(timestamp string) MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatUpdate)
+ config.values.Add("ts", timestamp)
+ return nil
+ }
+}
+
+// MsgOptionDelete deletes a message based on the timestamp.
+func MsgOptionDelete(timestamp string) MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatDelete)
+ config.values.Add("ts", timestamp)
+ return nil
+ }
+}
+
+// MsgOptionUnfurl unfurls a message based on the timestamp.
+func MsgOptionUnfurl(timestamp string, unfurls map[string]Attachment) MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatUnfurl)
+ config.values.Add("ts", timestamp)
+ unfurlsStr, err := json.Marshal(unfurls)
+ if err == nil {
+ config.values.Add("unfurls", string(unfurlsStr))
+ }
+ return err
+ }
+}
+
+// MsgOptionUnfurlAuthURL unfurls a message using an auth url based on the timestamp.
+func MsgOptionUnfurlAuthURL(timestamp string, userAuthURL string) MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatUnfurl)
+ config.values.Add("ts", timestamp)
+ config.values.Add("user_auth_url", userAuthURL)
+ return nil
+ }
+}
+
+// MsgOptionUnfurlAuthRequired requests that the user installs the
+// Slack app for unfurling.
+func MsgOptionUnfurlAuthRequired(timestamp string) MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatUnfurl)
+ config.values.Add("ts", timestamp)
+ config.values.Add("user_auth_required", "true")
+ return nil
+ }
+}
+
+// MsgOptionUnfurlAuthMessage attaches a message inviting the user to
+// authenticate.
+func MsgOptionUnfurlAuthMessage(timestamp string, msg string) MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = config.apiurl + string(chatUnfurl)
+ config.values.Add("ts", timestamp)
+ config.values.Add("user_auth_message", msg)
+ return nil
+ }
+}
+
+// MsgOptionResponseURL supplies a url to use as the endpoint.
+func MsgOptionResponseURL(url string, responseType string) MsgOption {
+ return func(config *sendConfig) error {
+ config.mode = chatResponse
+ config.endpoint = url
+ config.responseType = responseType
+ config.values.Del("ts")
+ return nil
+ }
+}
+
+// MsgOptionReplaceOriginal replaces original message with response url
+func MsgOptionReplaceOriginal(responseURL string) MsgOption {
+ return func(config *sendConfig) error {
+ config.mode = chatResponse
+ config.endpoint = responseURL
+ config.replaceOriginal = true
+ return nil
+ }
+}
+
+// MsgOptionDeleteOriginal deletes original message with response url
+func MsgOptionDeleteOriginal(responseURL string) MsgOption {
+ return func(config *sendConfig) error {
+ config.mode = chatResponse
+ config.endpoint = responseURL
+ config.deleteOriginal = true
+ return nil
+ }
+}
+
+// MsgOptionAsUser whether or not to send the message as the user.
+func MsgOptionAsUser(b bool) MsgOption {
+ return func(config *sendConfig) error {
+ if b != DEFAULT_MESSAGE_ASUSER {
+ config.values.Set("as_user", "true")
+ }
+ return nil
+ }
+}
+
+// MsgOptionUser set the user for the message.
+func MsgOptionUser(userID string) MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("user", userID)
+ return nil
+ }
+}
+
+// MsgOptionUsername set the username for the message.
+func MsgOptionUsername(username string) MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("username", username)
+ return nil
+ }
+}
+
+// MsgOptionText provide the text for the message, optionally escape the provided
+// text.
+func MsgOptionText(text string, escape bool) MsgOption {
+ return func(config *sendConfig) error {
+ if escape {
+ text = slackutilsx.EscapeMessage(text)
+ }
+ config.values.Add("text", text)
+ return nil
+ }
+}
+
+// MsgOptionAttachments provide attachments for the message.
+func MsgOptionAttachments(attachments ...Attachment) MsgOption {
+ return func(config *sendConfig) error {
+ if attachments == nil {
+ return nil
+ }
+
+ config.attachments = attachments
+
+ // FIXME: We are setting the attachments on the message twice: above for
+ // the json version, and below for the html version. The marshalled bytes
+ // we put into config.values below don't work directly in the Msg version.
+
+ attachmentBytes, err := json.Marshal(attachments)
+ if err == nil {
+ config.values.Set("attachments", string(attachmentBytes))
+ }
+
+ return err
+ }
+}
+
+// MsgOptionBlocks sets blocks for the message
+func MsgOptionBlocks(blocks ...Block) MsgOption {
+ return func(config *sendConfig) error {
+ if blocks == nil {
+ return nil
+ }
+
+ config.blocks.BlockSet = append(config.blocks.BlockSet, blocks...)
+
+ blocks, err := json.Marshal(blocks)
+ if err == nil {
+ config.values.Set("blocks", string(blocks))
+ }
+ return err
+ }
+}
+
+// MsgOptionEnableLinkUnfurl enables link unfurling
+func MsgOptionEnableLinkUnfurl() MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("unfurl_links", "true")
+ return nil
+ }
+}
+
+// MsgOptionDisableLinkUnfurl disables link unfurling
+func MsgOptionDisableLinkUnfurl() MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("unfurl_links", "false")
+ return nil
+ }
+}
+
+// MsgOptionDisableMediaUnfurl disables media unfurling.
+func MsgOptionDisableMediaUnfurl() MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("unfurl_media", "false")
+ return nil
+ }
+}
+
+// MsgOptionDisableMarkdown disables markdown.
+func MsgOptionDisableMarkdown() MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("mrkdwn", "false")
+ return nil
+ }
+}
+
+// MsgOptionTS sets the thread TS of the message to enable creating or replying to a thread
+func MsgOptionTS(ts string) MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("thread_ts", ts)
+ return nil
+ }
+}
+
+// MsgOptionBroadcast sets reply_broadcast to true
+func MsgOptionBroadcast() MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("reply_broadcast", "true")
+ return nil
+ }
+}
+
+// MsgOptionCompose combines multiple options into a single option.
+func MsgOptionCompose(options ...MsgOption) MsgOption {
+ return func(config *sendConfig) error {
+ for _, opt := range options {
+ if err := opt(config); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+// MsgOptionParse set parse option.
+func MsgOptionParse(b bool) MsgOption {
+ return func(config *sendConfig) error {
+ var v string
+ if b {
+ v = "full"
+ } else {
+ v = "none"
+ }
+ config.values.Set("parse", v)
+ return nil
+ }
+}
+
+// MsgOptionIconURL sets an icon URL
+func MsgOptionIconURL(iconURL string) MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("icon_url", iconURL)
+ return nil
+ }
+}
+
+// MsgOptionIconEmoji sets an icon emoji
+func MsgOptionIconEmoji(iconEmoji string) MsgOption {
+ return func(config *sendConfig) error {
+ config.values.Set("icon_emoji", iconEmoji)
+ return nil
+ }
+}
+
+// MsgOptionMetadata sets message metadata
+func MsgOptionMetadata(metadata SlackMetadata) MsgOption {
+ return func(config *sendConfig) error {
+ config.metadata = metadata
+ meta, err := json.Marshal(metadata)
+ if err == nil {
+ config.values.Set("metadata", string(meta))
+ }
+ return err
+ }
+}
+
+// UnsafeMsgOptionEndpoint deliver the message to the specified endpoint.
+// NOTE: USE AT YOUR OWN RISK: No issues relating to the use of this Option
+// will be supported by the library, it is subject to change without notice that
+// may result in compilation errors or runtime behaviour changes.
+func UnsafeMsgOptionEndpoint(endpoint string, update func(url.Values)) MsgOption {
+ return func(config *sendConfig) error {
+ config.endpoint = endpoint
+ update(config.values)
+ return nil
+ }
+}
+
+// MsgOptionPostMessageParameters maintain backwards compatibility.
+func MsgOptionPostMessageParameters(params PostMessageParameters) MsgOption {
+ return func(config *sendConfig) error {
+ if params.Username != DEFAULT_MESSAGE_USERNAME {
+ config.values.Set("username", params.Username)
+ }
+
+ // chat.postEphemeral support
+ if params.User != DEFAULT_MESSAGE_USERNAME {
+ config.values.Set("user", params.User)
+ }
+
+ // never generates an error.
+ MsgOptionAsUser(params.AsUser)(config)
+
+ if params.Parse != DEFAULT_MESSAGE_PARSE {
+ config.values.Set("parse", params.Parse)
+ }
+ if params.LinkNames != DEFAULT_MESSAGE_LINK_NAMES {
+ config.values.Set("link_names", "1")
+ }
+
+ if params.UnfurlLinks != DEFAULT_MESSAGE_UNFURL_LINKS {
+ config.values.Set("unfurl_links", "true")
+ }
+
+ // I want to send a message with explicit `as_user` `true` and `unfurl_links` `false` in request.
+ // Because setting `as_user` to `true` will change the default value for `unfurl_links` to `true` on Slack API side.
+ if params.AsUser != DEFAULT_MESSAGE_ASUSER && params.UnfurlLinks == DEFAULT_MESSAGE_UNFURL_LINKS {
+ config.values.Set("unfurl_links", "false")
+ }
+ if params.UnfurlMedia != DEFAULT_MESSAGE_UNFURL_MEDIA {
+ config.values.Set("unfurl_media", "false")
+ }
+ if params.IconURL != DEFAULT_MESSAGE_ICON_URL {
+ config.values.Set("icon_url", params.IconURL)
+ }
+ if params.IconEmoji != DEFAULT_MESSAGE_ICON_EMOJI {
+ config.values.Set("icon_emoji", params.IconEmoji)
+ }
+ if params.Markdown != DEFAULT_MESSAGE_MARKDOWN {
+ config.values.Set("mrkdwn", "false")
+ }
+
+ if params.ThreadTimestamp != DEFAULT_MESSAGE_THREAD_TIMESTAMP {
+ config.values.Set("thread_ts", params.ThreadTimestamp)
+ }
+ if params.ReplyBroadcast != DEFAULT_MESSAGE_REPLY_BROADCAST {
+ config.values.Set("reply_broadcast", "true")
+ }
+
+ return nil
+ }
+}
+
+// PermalinkParameters are the parameters required to get a permalink to a
+// message. Slack documentation can be found here:
+// https://api.slack.com/methods/chat.getPermalink
+type PermalinkParameters struct {
+ Channel string
+ Ts string
+}
+
+// GetPermalink returns the permalink for a message. It takes
+// PermalinkParameters and returns a string containing the permalink. It
+// returns an error if unable to retrieve the permalink.
+func (api *Client) GetPermalink(params *PermalinkParameters) (string, error) {
+ return api.GetPermalinkContext(context.Background(), params)
+}
+
+// GetPermalinkContext returns the permalink for a message using a custom context.
+func (api *Client) GetPermalinkContext(ctx context.Context, params *PermalinkParameters) (string, error) {
+ values := url.Values{
+ "channel": {params.Channel},
+ "message_ts": {params.Ts},
+ }
+
+ response := struct {
+ Channel string `json:"channel"`
+ Permalink string `json:"permalink"`
+ SlackResponse
+ }{}
+ err := api.getMethod(ctx, "chat.getPermalink", api.token, values, &response)
+ if err != nil {
+ return "", err
+ }
+ return response.Permalink, response.Err()
+}
+
+type GetScheduledMessagesParameters struct {
+ Channel string
+ Cursor string
+ Latest string
+ Limit int
+ Oldest string
+}
+
+// GetScheduledMessages returns the list of scheduled messages based on params
+func (api *Client) GetScheduledMessages(params *GetScheduledMessagesParameters) (channels []ScheduledMessage, nextCursor string, err error) {
+ return api.GetScheduledMessagesContext(context.Background(), params)
+}
+
+// GetScheduledMessagesContext returns the list of scheduled messages in a Slack team with a custom context
+func (api *Client) GetScheduledMessagesContext(ctx context.Context, params *GetScheduledMessagesParameters) (channels []ScheduledMessage, nextCursor string, err error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+ if params.Channel != "" {
+ values.Add("channel", params.Channel)
+ }
+ if params.Cursor != "" {
+ values.Add("cursor", params.Cursor)
+ }
+ if params.Limit != 0 {
+ values.Add("limit", strconv.Itoa(params.Limit))
+ }
+ if params.Latest != "" {
+ values.Add("latest", params.Latest)
+ }
+ if params.Oldest != "" {
+ values.Add("oldest", params.Oldest)
+ }
+ response := struct {
+ Messages []ScheduledMessage `json:"scheduled_messages"`
+ ResponseMetaData responseMetaData `json:"response_metadata"`
+ SlackResponse
+ }{}
+
+ err = api.postMethod(ctx, "chat.scheduledMessages.list", values, &response)
+ if err != nil {
+ return nil, "", err
+ }
+
+ return response.Messages, response.ResponseMetaData.NextCursor, response.Err()
+}
+
+type DeleteScheduledMessageParameters struct {
+ Channel string
+ ScheduledMessageID string
+ AsUser bool
+}
+
+// DeleteScheduledMessage returns the list of scheduled messages based on params
+func (api *Client) DeleteScheduledMessage(params *DeleteScheduledMessageParameters) (bool, error) {
+ return api.DeleteScheduledMessageContext(context.Background(), params)
+}
+
+// DeleteScheduledMessageContext returns the list of scheduled messages in a Slack team with a custom context
+func (api *Client) DeleteScheduledMessageContext(ctx context.Context, params *DeleteScheduledMessageParameters) (bool, error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {params.Channel},
+ "scheduled_message_id": {params.ScheduledMessageID},
+ "as_user": {strconv.FormatBool(params.AsUser)},
+ }
+ response := struct {
+ SlackResponse
+ }{}
+
+ err := api.postMethod(ctx, "chat.deleteScheduledMessage", values, &response)
+ if err != nil {
+ return false, err
+ }
+
+ return response.Ok, response.Err()
+}
diff --git a/vendor/github.com/slack-go/slack/comment.go b/vendor/github.com/slack-go/slack/comment.go
new file mode 100644
index 0000000..7d1c0d4
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/comment.go
@@ -0,0 +1,10 @@
+package slack
+
+// Comment contains all the information relative to a comment
+type Comment struct {
+ ID string `json:"id,omitempty"`
+ Created JSONTime `json:"created,omitempty"`
+ Timestamp JSONTime `json:"timestamp,omitempty"`
+ User string `json:"user,omitempty"`
+ Comment string `json:"comment,omitempty"`
+}
diff --git a/vendor/github.com/slack-go/slack/conversation.go b/vendor/github.com/slack-go/slack/conversation.go
new file mode 100644
index 0000000..1a63c53
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/conversation.go
@@ -0,0 +1,739 @@
+package slack
+
+import (
+ "context"
+ "errors"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+// Conversation is the foundation for IM and BaseGroupConversation
+type Conversation struct {
+ ID string `json:"id"`
+ Created JSONTime `json:"created"`
+ IsOpen bool `json:"is_open"`
+ LastRead string `json:"last_read,omitempty"`
+ Latest *Message `json:"latest,omitempty"`
+ UnreadCount int `json:"unread_count,omitempty"`
+ UnreadCountDisplay int `json:"unread_count_display,omitempty"`
+ IsGroup bool `json:"is_group"`
+ IsShared bool `json:"is_shared"`
+ IsIM bool `json:"is_im"`
+ IsExtShared bool `json:"is_ext_shared"`
+ IsOrgShared bool `json:"is_org_shared"`
+ IsPendingExtShared bool `json:"is_pending_ext_shared"`
+ IsPrivate bool `json:"is_private"`
+ IsMpIM bool `json:"is_mpim"`
+ Unlinked int `json:"unlinked"`
+ NameNormalized string `json:"name_normalized"`
+ NumMembers int `json:"num_members"`
+ Priority float64 `json:"priority"`
+ User string `json:"user"`
+
+ // TODO support pending_shared
+ // TODO support previous_names
+}
+
+// GroupConversation is the foundation for Group and Channel
+type GroupConversation struct {
+ Conversation
+ Name string `json:"name"`
+ Creator string `json:"creator"`
+ IsArchived bool `json:"is_archived"`
+ Members []string `json:"members"`
+ Topic Topic `json:"topic"`
+ Purpose Purpose `json:"purpose"`
+}
+
+// Topic contains information about the topic
+type Topic struct {
+ Value string `json:"value"`
+ Creator string `json:"creator"`
+ LastSet JSONTime `json:"last_set"`
+}
+
+// Purpose contains information about the purpose
+type Purpose struct {
+ Value string `json:"value"`
+ Creator string `json:"creator"`
+ LastSet JSONTime `json:"last_set"`
+}
+
+type GetUsersInConversationParameters struct {
+ ChannelID string
+ Cursor string
+ Limit int
+}
+
+type GetConversationsForUserParameters struct {
+ UserID string
+ Cursor string
+ Types []string
+ Limit int
+ ExcludeArchived bool
+ TeamID string
+}
+
+type responseMetaData struct {
+ NextCursor string `json:"next_cursor"`
+}
+
+// GetUsersInConversation returns the list of users in a conversation
+func (api *Client) GetUsersInConversation(params *GetUsersInConversationParameters) ([]string, string, error) {
+ return api.GetUsersInConversationContext(context.Background(), params)
+}
+
+// GetUsersInConversationContext returns the list of users in a conversation with a custom context
+func (api *Client) GetUsersInConversationContext(ctx context.Context, params *GetUsersInConversationParameters) ([]string, string, error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {params.ChannelID},
+ }
+ if params.Cursor != "" {
+ values.Add("cursor", params.Cursor)
+ }
+ if params.Limit != 0 {
+ values.Add("limit", strconv.Itoa(params.Limit))
+ }
+ response := struct {
+ Members []string `json:"members"`
+ ResponseMetaData responseMetaData `json:"response_metadata"`
+ SlackResponse
+ }{}
+
+ err := api.postMethod(ctx, "conversations.members", values, &response)
+ if err != nil {
+ return nil, "", err
+ }
+
+ if err := response.Err(); err != nil {
+ return nil, "", err
+ }
+
+ return response.Members, response.ResponseMetaData.NextCursor, nil
+}
+
+// GetConversationsForUser returns the list conversations for a given user
+func (api *Client) GetConversationsForUser(params *GetConversationsForUserParameters) (channels []Channel, nextCursor string, err error) {
+ return api.GetConversationsForUserContext(context.Background(), params)
+}
+
+// GetConversationsForUserContext returns the list conversations for a given user with a custom context
+func (api *Client) GetConversationsForUserContext(ctx context.Context, params *GetConversationsForUserParameters) (channels []Channel, nextCursor string, err error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+ if params.UserID != "" {
+ values.Add("user", params.UserID)
+ }
+ if params.Cursor != "" {
+ values.Add("cursor", params.Cursor)
+ }
+ if params.Limit != 0 {
+ values.Add("limit", strconv.Itoa(params.Limit))
+ }
+ if params.Types != nil {
+ values.Add("types", strings.Join(params.Types, ","))
+ }
+ if params.ExcludeArchived {
+ values.Add("exclude_archived", "true")
+ }
+ if params.TeamID != "" {
+ values.Add("team_id", params.TeamID)
+ }
+
+ response := struct {
+ Channels []Channel `json:"channels"`
+ ResponseMetaData responseMetaData `json:"response_metadata"`
+ SlackResponse
+ }{}
+ err = api.postMethod(ctx, "users.conversations", values, &response)
+ if err != nil {
+ return nil, "", err
+ }
+
+ return response.Channels, response.ResponseMetaData.NextCursor, response.Err()
+}
+
+// ArchiveConversation archives a conversation
+func (api *Client) ArchiveConversation(channelID string) error {
+ return api.ArchiveConversationContext(context.Background(), channelID)
+}
+
+// ArchiveConversationContext archives a conversation with a custom context
+func (api *Client) ArchiveConversationContext(ctx context.Context, channelID string) error {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ }
+
+ response := SlackResponse{}
+ err := api.postMethod(ctx, "conversations.archive", values, &response)
+ if err != nil {
+ return err
+ }
+
+ return response.Err()
+}
+
+// UnArchiveConversation reverses conversation archival
+func (api *Client) UnArchiveConversation(channelID string) error {
+ return api.UnArchiveConversationContext(context.Background(), channelID)
+}
+
+// UnArchiveConversationContext reverses conversation archival with a custom context
+func (api *Client) UnArchiveConversationContext(ctx context.Context, channelID string) error {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ }
+ response := SlackResponse{}
+ err := api.postMethod(ctx, "conversations.unarchive", values, &response)
+ if err != nil {
+ return err
+ }
+
+ return response.Err()
+}
+
+// SetTopicOfConversation sets the topic for a conversation
+func (api *Client) SetTopicOfConversation(channelID, topic string) (*Channel, error) {
+ return api.SetTopicOfConversationContext(context.Background(), channelID, topic)
+}
+
+// SetTopicOfConversationContext sets the topic for a conversation with a custom context
+func (api *Client) SetTopicOfConversationContext(ctx context.Context, channelID, topic string) (*Channel, error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ "topic": {topic},
+ }
+ response := struct {
+ SlackResponse
+ Channel *Channel `json:"channel"`
+ }{}
+ err := api.postMethod(ctx, "conversations.setTopic", values, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Channel, response.Err()
+}
+
+// SetPurposeOfConversation sets the purpose for a conversation
+func (api *Client) SetPurposeOfConversation(channelID, purpose string) (*Channel, error) {
+ return api.SetPurposeOfConversationContext(context.Background(), channelID, purpose)
+}
+
+// SetPurposeOfConversationContext sets the purpose for a conversation with a custom context
+func (api *Client) SetPurposeOfConversationContext(ctx context.Context, channelID, purpose string) (*Channel, error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ "purpose": {purpose},
+ }
+ response := struct {
+ SlackResponse
+ Channel *Channel `json:"channel"`
+ }{}
+
+ err := api.postMethod(ctx, "conversations.setPurpose", values, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Channel, response.Err()
+}
+
+// RenameConversation renames a conversation
+func (api *Client) RenameConversation(channelID, channelName string) (*Channel, error) {
+ return api.RenameConversationContext(context.Background(), channelID, channelName)
+}
+
+// RenameConversationContext renames a conversation with a custom context
+func (api *Client) RenameConversationContext(ctx context.Context, channelID, channelName string) (*Channel, error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ "name": {channelName},
+ }
+ response := struct {
+ SlackResponse
+ Channel *Channel `json:"channel"`
+ }{}
+
+ err := api.postMethod(ctx, "conversations.rename", values, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Channel, response.Err()
+}
+
+// InviteUsersToConversation invites users to a channel
+func (api *Client) InviteUsersToConversation(channelID string, users ...string) (*Channel, error) {
+ return api.InviteUsersToConversationContext(context.Background(), channelID, users...)
+}
+
+// InviteUsersToConversationContext invites users to a channel with a custom context
+func (api *Client) InviteUsersToConversationContext(ctx context.Context, channelID string, users ...string) (*Channel, error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ "users": {strings.Join(users, ",")},
+ }
+ response := struct {
+ SlackResponse
+ Channel *Channel `json:"channel"`
+ }{}
+
+ err := api.postMethod(ctx, "conversations.invite", values, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response.Channel, response.Err()
+}
+
+// InviteSharedEmailsToConversation invites users to a shared channels by email
+func (api *Client) InviteSharedEmailsToConversation(channelID string, emails ...string) (string, bool, error) {
+ return api.inviteSharedToConversationHelper(context.Background(), channelID, emails, nil)
+}
+
+// InviteSharedEmailsToConversationContext invites users to a shared channels by email using context
+func (api *Client) InviteSharedEmailsToConversationContext(ctx context.Context, channelID string, emails ...string) (string, bool, error) {
+ return api.inviteSharedToConversationHelper(ctx, channelID, emails, nil)
+}
+
+// InviteSharedUserIDsToConversation invites users to a shared channels by user id
+func (api *Client) InviteSharedUserIDsToConversation(channelID string, userIDs ...string) (string, bool, error) {
+ return api.inviteSharedToConversationHelper(context.Background(), channelID, nil, userIDs)
+}
+
+// InviteSharedUserIDsToConversationContext invites users to a shared channels by user id with context
+func (api *Client) InviteSharedUserIDsToConversationContext(ctx context.Context, channelID string, userIDs ...string) (string, bool, error) {
+ return api.inviteSharedToConversationHelper(ctx, channelID, nil, userIDs)
+}
+
+// inviteSharedToConversationHelper invites emails or userIDs to a channel with a custom context.
+// This is a helper function for InviteSharedEmailsToConversation and InviteSharedUserIDsToConversation.
+// It accepts either emails or userIDs, but not both.
+func (api *Client) inviteSharedToConversationHelper(ctx context.Context, channelID string, emails []string, userIDs []string) (string, bool, error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ }
+ if len(emails) > 0 {
+ values.Add("emails", strings.Join(emails, ","))
+ } else if len(userIDs) > 0 {
+ values.Add("user_ids", strings.Join(userIDs, ","))
+ }
+ response := struct {
+ SlackResponse
+ InviteID string `json:"invite_id"`
+ IsLegacySharedChannel bool `json:"is_legacy_shared_channel"`
+ }{}
+
+ err := api.postMethod(ctx, "conversations.inviteShared", values, &response)
+ if err != nil {
+ return "", false, err
+ }
+
+ return response.InviteID, response.IsLegacySharedChannel, response.Err()
+}
+
+// KickUserFromConversation removes a user from a conversation
+func (api *Client) KickUserFromConversation(channelID string, user string) error {
+ return api.KickUserFromConversationContext(context.Background(), channelID, user)
+}
+
+// KickUserFromConversationContext removes a user from a conversation with a custom context
+func (api *Client) KickUserFromConversationContext(ctx context.Context, channelID string, user string) error {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ "user": {user},
+ }
+
+ response := SlackResponse{}
+ err := api.postMethod(ctx, "conversations.kick", values, &response)
+ if err != nil {
+ return err
+ }
+
+ return response.Err()
+}
+
+// CloseConversation closes a direct message or multi-person direct message
+func (api *Client) CloseConversation(channelID string) (noOp bool, alreadyClosed bool, err error) {
+ return api.CloseConversationContext(context.Background(), channelID)
+}
+
+// CloseConversationContext closes a direct message or multi-person direct message with a custom context
+func (api *Client) CloseConversationContext(ctx context.Context, channelID string) (noOp bool, alreadyClosed bool, err error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ }
+ response := struct {
+ SlackResponse
+ NoOp bool `json:"no_op"`
+ AlreadyClosed bool `json:"already_closed"`
+ }{}
+
+ err = api.postMethod(ctx, "conversations.close", values, &response)
+ if err != nil {
+ return false, false, err
+ }
+
+ return response.NoOp, response.AlreadyClosed, response.Err()
+}
+
+type CreateConversationParams struct {
+ ChannelName string
+ IsPrivate bool
+ TeamID string
+}
+
+// CreateConversation initiates a public or private channel-based conversation
+func (api *Client) CreateConversation(params CreateConversationParams) (*Channel, error) {
+ return api.CreateConversationContext(context.Background(), params)
+}
+
+// CreateConversationContext initiates a public or private channel-based conversation with a custom context
+func (api *Client) CreateConversationContext(ctx context.Context, params CreateConversationParams) (*Channel, error) {
+ values := url.Values{
+ "token": {api.token},
+ "name": {params.ChannelName},
+ "is_private": {strconv.FormatBool(params.IsPrivate)},
+ }
+ if params.TeamID != "" {
+ values.Set("team_id", params.TeamID)
+ }
+ response, err := api.channelRequest(ctx, "conversations.create", values)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response.Channel, nil
+}
+
+// GetConversationInfoInput Defines the parameters of a GetConversationInfo and GetConversationInfoContext function
+type GetConversationInfoInput struct {
+ ChannelID string
+ IncludeLocale bool
+ IncludeNumMembers bool
+}
+
+// GetConversationInfo retrieves information about a conversation
+func (api *Client) GetConversationInfo(input *GetConversationInfoInput) (*Channel, error) {
+ return api.GetConversationInfoContext(context.Background(), input)
+}
+
+// GetConversationInfoContext retrieves information about a conversation with a custom context
+func (api *Client) GetConversationInfoContext(ctx context.Context, input *GetConversationInfoInput) (*Channel, error) {
+ if input == nil {
+ return nil, errors.New("GetConversationInfoInput must not be nil")
+ }
+
+ if input.ChannelID == "" {
+ return nil, errors.New("ChannelID must be defined")
+ }
+
+ values := url.Values{
+ "token": {api.token},
+ "channel": {input.ChannelID},
+ "include_locale": {strconv.FormatBool(input.IncludeLocale)},
+ "include_num_members": {strconv.FormatBool(input.IncludeNumMembers)},
+ }
+ response, err := api.channelRequest(ctx, "conversations.info", values)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response.Channel, response.Err()
+}
+
+// LeaveConversation leaves a conversation
+func (api *Client) LeaveConversation(channelID string) (bool, error) {
+ return api.LeaveConversationContext(context.Background(), channelID)
+}
+
+// LeaveConversationContext leaves a conversation with a custom context
+func (api *Client) LeaveConversationContext(ctx context.Context, channelID string) (bool, error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channelID},
+ }
+
+ response, err := api.channelRequest(ctx, "conversations.leave", values)
+ if err != nil {
+ return false, err
+ }
+
+ return response.NotInChannel, err
+}
+
+type GetConversationRepliesParameters struct {
+ ChannelID string
+ Timestamp string
+ Cursor string
+ Inclusive bool
+ Latest string
+ Limit int
+ Oldest string
+ IncludeAllMetadata bool
+}
+
+// GetConversationReplies retrieves a thread of messages posted to a conversation
+func (api *Client) GetConversationReplies(params *GetConversationRepliesParameters) (msgs []Message, hasMore bool, nextCursor string, err error) {
+ return api.GetConversationRepliesContext(context.Background(), params)
+}
+
+// GetConversationRepliesContext retrieves a thread of messages posted to a conversation with a custom context
+func (api *Client) GetConversationRepliesContext(ctx context.Context, params *GetConversationRepliesParameters) (msgs []Message, hasMore bool, nextCursor string, err error) {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {params.ChannelID},
+ "ts": {params.Timestamp},
+ }
+ if params.Cursor != "" {
+ values.Add("cursor", params.Cursor)
+ }
+ if params.Latest != "" {
+ values.Add("latest", params.Latest)
+ }
+ if params.Limit != 0 {
+ values.Add("limit", strconv.Itoa(params.Limit))
+ }
+ if params.Oldest != "" {
+ values.Add("oldest", params.Oldest)
+ }
+ if params.Inclusive {
+ values.Add("inclusive", "1")
+ } else {
+ values.Add("inclusive", "0")
+ }
+ if params.IncludeAllMetadata {
+ values.Add("include_all_metadata", "1")
+ } else {
+ values.Add("include_all_metadata", "0")
+ }
+ response := struct {
+ SlackResponse
+ HasMore bool `json:"has_more"`
+ ResponseMetaData struct {
+ NextCursor string `json:"next_cursor"`
+ } `json:"response_metadata"`
+ Messages []Message `json:"messages"`
+ }{}
+
+ err = api.postMethod(ctx, "conversations.replies", values, &response)
+ if err != nil {
+ return nil, false, "", err
+ }
+
+ return response.Messages, response.HasMore, response.ResponseMetaData.NextCursor, response.Err()
+}
+
+type GetConversationsParameters struct {
+ Cursor string
+ ExcludeArchived bool
+ Limit int
+ Types []string
+ TeamID string
+}
+
+// GetConversations returns the list of channels in a Slack team
+func (api *Client) GetConversations(params *GetConversationsParameters) (channels []Channel, nextCursor string, err error) {
+ return api.GetConversationsContext(context.Background(), params)
+}
+
+// GetConversationsContext returns the list of channels in a Slack team with a custom context
+func (api *Client) GetConversationsContext(ctx context.Context, params *GetConversationsParameters) (channels []Channel, nextCursor string, err error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+ if params.Cursor != "" {
+ values.Add("cursor", params.Cursor)
+ }
+ if params.Limit != 0 {
+ values.Add("limit", strconv.Itoa(params.Limit))
+ }
+ if params.Types != nil {
+ values.Add("types", strings.Join(params.Types, ","))
+ }
+ if params.ExcludeArchived {
+ values.Add("exclude_archived", strconv.FormatBool(params.ExcludeArchived))
+ }
+ if params.TeamID != "" {
+ values.Add("team_id", params.TeamID)
+ }
+
+ response := struct {
+ Channels []Channel `json:"channels"`
+ ResponseMetaData responseMetaData `json:"response_metadata"`
+ SlackResponse
+ }{}
+
+ err = api.postMethod(ctx, "conversations.list", values, &response)
+ if err != nil {
+ return nil, "", err
+ }
+
+ return response.Channels, response.ResponseMetaData.NextCursor, response.Err()
+}
+
+type OpenConversationParameters struct {
+ ChannelID string
+ ReturnIM bool
+ Users []string
+}
+
+// OpenConversation opens or resumes a direct message or multi-person direct message
+func (api *Client) OpenConversation(params *OpenConversationParameters) (*Channel, bool, bool, error) {
+ return api.OpenConversationContext(context.Background(), params)
+}
+
+// OpenConversationContext opens or resumes a direct message or multi-person direct message with a custom context
+func (api *Client) OpenConversationContext(ctx context.Context, params *OpenConversationParameters) (*Channel, bool, bool, error) {
+ values := url.Values{
+ "token": {api.token},
+ "return_im": {strconv.FormatBool(params.ReturnIM)},
+ }
+ if params.ChannelID != "" {
+ values.Add("channel", params.ChannelID)
+ }
+ if params.Users != nil {
+ values.Add("users", strings.Join(params.Users, ","))
+ }
+ response := struct {
+ Channel *Channel `json:"channel"`
+ NoOp bool `json:"no_op"`
+ AlreadyOpen bool `json:"already_open"`
+ SlackResponse
+ }{}
+
+ err := api.postMethod(ctx, "conversations.open", values, &response)
+ if err != nil {
+ return nil, false, false, err
+ }
+
+ return response.Channel, response.NoOp, response.AlreadyOpen, response.Err()
+}
+
+// JoinConversation joins an existing conversation
+func (api *Client) JoinConversation(channelID string) (*Channel, string, []string, error) {
+ return api.JoinConversationContext(context.Background(), channelID)
+}
+
+// JoinConversationContext joins an existing conversation with a custom context
+func (api *Client) JoinConversationContext(ctx context.Context, channelID string) (*Channel, string, []string, error) {
+ values := url.Values{"token": {api.token}, "channel": {channelID}}
+ response := struct {
+ Channel *Channel `json:"channel"`
+ Warning string `json:"warning"`
+ ResponseMetaData *struct {
+ Warnings []string `json:"warnings"`
+ } `json:"response_metadata"`
+ SlackResponse
+ }{}
+
+ err := api.postMethod(ctx, "conversations.join", values, &response)
+ if err != nil {
+ return nil, "", nil, err
+ }
+ if response.Err() != nil {
+ return nil, "", nil, response.Err()
+ }
+ var warnings []string
+ if response.ResponseMetaData != nil {
+ warnings = response.ResponseMetaData.Warnings
+ }
+ return response.Channel, response.Warning, warnings, nil
+}
+
+type GetConversationHistoryParameters struct {
+ ChannelID string
+ Cursor string
+ Inclusive bool
+ Latest string
+ Limit int
+ Oldest string
+ IncludeAllMetadata bool
+}
+
+type GetConversationHistoryResponse struct {
+ SlackResponse
+ HasMore bool `json:"has_more"`
+ PinCount int `json:"pin_count"`
+ Latest string `json:"latest"`
+ ResponseMetaData struct {
+ NextCursor string `json:"next_cursor"`
+ } `json:"response_metadata"`
+ Messages []Message `json:"messages"`
+}
+
+// GetConversationHistory joins an existing conversation
+func (api *Client) GetConversationHistory(params *GetConversationHistoryParameters) (*GetConversationHistoryResponse, error) {
+ return api.GetConversationHistoryContext(context.Background(), params)
+}
+
+// GetConversationHistoryContext joins an existing conversation with a custom context
+func (api *Client) GetConversationHistoryContext(ctx context.Context, params *GetConversationHistoryParameters) (*GetConversationHistoryResponse, error) {
+ values := url.Values{"token": {api.token}, "channel": {params.ChannelID}}
+ if params.Cursor != "" {
+ values.Add("cursor", params.Cursor)
+ }
+ if params.Inclusive {
+ values.Add("inclusive", "1")
+ } else {
+ values.Add("inclusive", "0")
+ }
+ if params.Latest != "" {
+ values.Add("latest", params.Latest)
+ }
+ if params.Limit != 0 {
+ values.Add("limit", strconv.Itoa(params.Limit))
+ }
+ if params.Oldest != "" {
+ values.Add("oldest", params.Oldest)
+ }
+ if params.IncludeAllMetadata {
+ values.Add("include_all_metadata", "1")
+ } else {
+ values.Add("include_all_metadata", "0")
+ }
+
+ response := GetConversationHistoryResponse{}
+
+ err := api.postMethod(ctx, "conversations.history", values, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response, response.Err()
+}
+
+// MarkConversation sets the read mark of a conversation to a specific point
+func (api *Client) MarkConversation(channel, ts string) (err error) {
+ return api.MarkConversationContext(context.Background(), channel, ts)
+}
+
+// MarkConversationContext sets the read mark of a conversation to a specific point with a custom context
+func (api *Client) MarkConversationContext(ctx context.Context, channel, ts string) error {
+ values := url.Values{
+ "token": {api.token},
+ "channel": {channel},
+ "ts": {ts},
+ }
+
+ response := &SlackResponse{}
+
+ err := api.postMethod(ctx, "conversations.mark", values, response)
+ if err != nil {
+ return err
+ }
+ return response.Err()
+}
diff --git a/vendor/github.com/slack-go/slack/dialog.go b/vendor/github.com/slack-go/slack/dialog.go
new file mode 100644
index 0000000..f94113f
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/dialog.go
@@ -0,0 +1,120 @@
+package slack
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+)
+
+// InputType is the type of the dialog input type
+type InputType string
+
+const (
+ // InputTypeText textfield input
+ InputTypeText InputType = "text"
+ // InputTypeTextArea textarea input
+ InputTypeTextArea InputType = "textarea"
+ // InputTypeSelect select menus input
+ InputTypeSelect InputType = "select"
+)
+
+// DialogInput for dialogs input type text or menu
+type DialogInput struct {
+ Type InputType `json:"type"`
+ Label string `json:"label"`
+ Name string `json:"name"`
+ Placeholder string `json:"placeholder"`
+ Optional bool `json:"optional"`
+ Hint string `json:"hint"`
+}
+
+// DialogTrigger ...
+type DialogTrigger struct {
+ TriggerID string `json:"trigger_id"` //Required. Must respond within 3 seconds.
+ Dialog Dialog `json:"dialog"` //Required.
+}
+
+// Dialog as in Slack dialogs
+// https://api.slack.com/dialogs#option_element_attributes#top-level_dialog_attributes
+type Dialog struct {
+ TriggerID string `json:"trigger_id"` // Required
+ CallbackID string `json:"callback_id"` // Required
+ State string `json:"state,omitempty"` // Optional
+ Title string `json:"title"`
+ SubmitLabel string `json:"submit_label,omitempty"`
+ NotifyOnCancel bool `json:"notify_on_cancel"`
+ Elements []DialogElement `json:"elements"`
+}
+
+// DialogElement abstract type for dialogs.
+type DialogElement interface{}
+
+// DialogCallback DEPRECATED use InteractionCallback
+type DialogCallback InteractionCallback
+
+// DialogSubmissionCallback is sent from Slack when a user submits a form from within a dialog
+type DialogSubmissionCallback struct {
+ // NOTE: State is only used with the dialog_submission type.
+ // You should use InteractionCallback.BlockActionsState for block_actions type.
+ State string `json:"-"`
+ Submission map[string]string `json:"submission"`
+}
+
+// DialogOpenResponse response from `dialog.open`
+type DialogOpenResponse struct {
+ SlackResponse
+ DialogResponseMetadata DialogResponseMetadata `json:"response_metadata"`
+}
+
+// DialogResponseMetadata lists the error messages
+type DialogResponseMetadata struct {
+ Messages []string `json:"messages"`
+}
+
+// DialogInputValidationError is an error when user inputs incorrect value to form from within a dialog
+type DialogInputValidationError struct {
+ Name string `json:"name"`
+ Error string `json:"error"`
+}
+
+// DialogInputValidationErrors lists the name of field and that error messages
+type DialogInputValidationErrors struct {
+ Errors []DialogInputValidationError `json:"errors"`
+}
+
+// OpenDialog opens a dialog window where the triggerID originated from.
+// EXPERIMENTAL: dialog functionality is currently experimental, api is not considered stable.
+func (api *Client) OpenDialog(triggerID string, dialog Dialog) (err error) {
+ return api.OpenDialogContext(context.Background(), triggerID, dialog)
+}
+
+// OpenDialogContext opens a dialog window where the triggerId originated from with a custom context
+// EXPERIMENTAL: dialog functionality is currently experimental, api is not considered stable.
+func (api *Client) OpenDialogContext(ctx context.Context, triggerID string, dialog Dialog) (err error) {
+ if triggerID == "" {
+ return ErrParametersMissing
+ }
+
+ req := DialogTrigger{
+ TriggerID: triggerID,
+ Dialog: dialog,
+ }
+
+ encoded, err := json.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ response := &DialogOpenResponse{}
+ endpoint := api.endpoint + "dialog.open"
+ if err := postJSON(ctx, api.httpclient, endpoint, api.token, encoded, response, api); err != nil {
+ return err
+ }
+
+ if len(response.DialogResponseMetadata.Messages) > 0 {
+ response.Ok = false
+ response.Error += "\n" + strings.Join(response.DialogResponseMetadata.Messages, "\n")
+ }
+
+ return response.Err()
+}
diff --git a/vendor/github.com/slack-go/slack/dialog_select.go b/vendor/github.com/slack-go/slack/dialog_select.go
new file mode 100644
index 0000000..3d6be98
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/dialog_select.go
@@ -0,0 +1,115 @@
+package slack
+
+// SelectDataSource types of select datasource
+type SelectDataSource string
+
+const (
+ // DialogDataSourceStatic menu with static Options/OptionGroups
+ DialogDataSourceStatic SelectDataSource = "static"
+ // DialogDataSourceExternal dynamic datasource
+ DialogDataSourceExternal SelectDataSource = "external"
+ // DialogDataSourceConversations provides a list of conversations
+ DialogDataSourceConversations SelectDataSource = "conversations"
+ // DialogDataSourceChannels provides a list of channels
+ DialogDataSourceChannels SelectDataSource = "channels"
+ // DialogDataSourceUsers provides a list of users
+ DialogDataSourceUsers SelectDataSource = "users"
+)
+
+// DialogInputSelect dialog support for select boxes.
+type DialogInputSelect struct {
+ DialogInput
+ Value string `json:"value,omitempty"` //Optional.
+ DataSource SelectDataSource `json:"data_source,omitempty"` //Optional. Allowed values: "users", "channels", "conversations", "external".
+ SelectedOptions []DialogSelectOption `json:"selected_options,omitempty"` //Optional. May hold at most one element, for use with "external" only.
+ Options []DialogSelectOption `json:"options,omitempty"` //One of options or option_groups is required.
+ OptionGroups []DialogOptionGroup `json:"option_groups,omitempty"` //Provide up to 100 options.
+ MinQueryLength int `json:"min_query_length,omitempty"` //Optional. minimum characters before query is sent.
+ Hint string `json:"hint,omitempty"` //Optional. Additional hint text.
+}
+
+// DialogSelectOption is an option for the user to select from the menu
+type DialogSelectOption struct {
+ Label string `json:"label"`
+ Value string `json:"value"`
+}
+
+// DialogOptionGroup is a collection of options for creating a segmented table
+type DialogOptionGroup struct {
+ Label string `json:"label"`
+ Options []DialogSelectOption `json:"options"`
+}
+
+// NewStaticSelectDialogInput constructor for a `static` datasource menu input
+func NewStaticSelectDialogInput(name, label string, options []DialogSelectOption) *DialogInputSelect {
+ return &DialogInputSelect{
+ DialogInput: DialogInput{
+ Type: InputTypeSelect,
+ Name: name,
+ Label: label,
+ Optional: true,
+ },
+ DataSource: DialogDataSourceStatic,
+ Options: options,
+ }
+}
+
+// NewExternalSelectDialogInput constructor for a `external` datasource menu input
+func NewExternalSelectDialogInput(name, label string, options []DialogSelectOption) *DialogInputSelect {
+ return &DialogInputSelect{
+ DialogInput: DialogInput{
+ Type: InputTypeSelect,
+ Name: name,
+ Label: label,
+ Optional: true,
+ },
+ DataSource: DialogDataSourceExternal,
+ Options: options,
+ }
+}
+
+// NewGroupedSelectDialogInput creates grouped options select input for Dialogs.
+func NewGroupedSelectDialogInput(name, label string, options []DialogOptionGroup) *DialogInputSelect {
+ return &DialogInputSelect{
+ DialogInput: DialogInput{
+ Type: InputTypeSelect,
+ Name: name,
+ Label: label,
+ },
+ DataSource: DialogDataSourceStatic,
+ OptionGroups: options}
+}
+
+// NewDialogOptionGroup creates a DialogOptionGroup from several select options
+func NewDialogOptionGroup(label string, options ...DialogSelectOption) DialogOptionGroup {
+ return DialogOptionGroup{
+ Label: label,
+ Options: options,
+ }
+}
+
+// NewConversationsSelect returns a `Conversations` select
+func NewConversationsSelect(name, label string) *DialogInputSelect {
+ return newPresetSelect(name, label, DialogDataSourceConversations)
+}
+
+// NewChannelsSelect returns a `Channels` select
+func NewChannelsSelect(name, label string) *DialogInputSelect {
+ return newPresetSelect(name, label, DialogDataSourceChannels)
+}
+
+// NewUsersSelect returns a `Users` select
+func NewUsersSelect(name, label string) *DialogInputSelect {
+ return newPresetSelect(name, label, DialogDataSourceUsers)
+}
+
+func newPresetSelect(name, label string, dataSourceType SelectDataSource) *DialogInputSelect {
+ return &DialogInputSelect{
+ DialogInput: DialogInput{
+ Type: InputTypeSelect,
+ Label: label,
+ Name: name,
+ },
+ DataSource: dataSourceType,
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/dialog_text.go b/vendor/github.com/slack-go/slack/dialog_text.go
new file mode 100644
index 0000000..25fa1b6
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/dialog_text.go
@@ -0,0 +1,59 @@
+package slack
+
+// TextInputSubtype Accepts email, number, tel, or url. In some form factors, optimized input is provided for this subtype.
+type TextInputSubtype string
+
+// TextInputOption handle to extra inputs options.
+type TextInputOption func(*TextInputElement)
+
+const (
+ // InputSubtypeEmail email keyboard
+ InputSubtypeEmail TextInputSubtype = "email"
+ // InputSubtypeNumber numeric keyboard
+ InputSubtypeNumber TextInputSubtype = "number"
+ // InputSubtypeTel Phone keyboard
+ InputSubtypeTel TextInputSubtype = "tel"
+ // InputSubtypeURL Phone keyboard
+ InputSubtypeURL TextInputSubtype = "url"
+)
+
+// TextInputElement subtype of DialogInput
+// https://api.slack.com/dialogs#option_element_attributes#text_element_attributes
+type TextInputElement struct {
+ DialogInput
+ MaxLength int `json:"max_length,omitempty"`
+ MinLength int `json:"min_length,omitempty"`
+ Hint string `json:"hint,omitempty"`
+ Subtype TextInputSubtype `json:"subtype"`
+ Value string `json:"value"`
+}
+
+// NewTextInput constructor for a `text` input
+func NewTextInput(name, label, text string, options ...TextInputOption) *TextInputElement {
+ t := &TextInputElement{
+ DialogInput: DialogInput{
+ Type: InputTypeText,
+ Name: name,
+ Label: label,
+ },
+ Value: text,
+ }
+
+ for _, opt := range options {
+ opt(t)
+ }
+
+ return t
+}
+
+// NewTextAreaInput constructor for a `textarea` input
+func NewTextAreaInput(name, label, text string) *TextInputElement {
+ return &TextInputElement{
+ DialogInput: DialogInput{
+ Type: InputTypeTextArea,
+ Name: name,
+ Label: label,
+ },
+ Value: text,
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/dnd.go b/vendor/github.com/slack-go/slack/dnd.go
new file mode 100644
index 0000000..a3aa680
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/dnd.go
@@ -0,0 +1,151 @@
+package slack
+
+import (
+ "context"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+type SnoozeDebug struct {
+ SnoozeEndDate string `json:"snooze_end_date"`
+}
+
+type SnoozeInfo struct {
+ SnoozeEnabled bool `json:"snooze_enabled,omitempty"`
+ SnoozeEndTime int `json:"snooze_endtime,omitempty"`
+ SnoozeRemaining int `json:"snooze_remaining,omitempty"`
+ SnoozeDebug SnoozeDebug `json:"snooze_debug,omitempty"`
+}
+
+type DNDStatus struct {
+ Enabled bool `json:"dnd_enabled"`
+ NextStartTimestamp int `json:"next_dnd_start_ts"`
+ NextEndTimestamp int `json:"next_dnd_end_ts"`
+ SnoozeInfo
+}
+
+type dndResponseFull struct {
+ DNDStatus
+ SlackResponse
+}
+
+type dndTeamInfoResponse struct {
+ Users map[string]DNDStatus `json:"users"`
+ SlackResponse
+}
+
+func (api *Client) dndRequest(ctx context.Context, path string, values url.Values) (*dndResponseFull, error) {
+ response := &dndResponseFull{}
+ err := api.postMethod(ctx, path, values, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, response.Err()
+}
+
+// EndDND ends the user's scheduled Do Not Disturb session
+func (api *Client) EndDND() error {
+ return api.EndDNDContext(context.Background())
+}
+
+// EndDNDContext ends the user's scheduled Do Not Disturb session with a custom context
+func (api *Client) EndDNDContext(ctx context.Context) error {
+ values := url.Values{
+ "token": {api.token},
+ }
+
+ response := &SlackResponse{}
+
+ if err := api.postMethod(ctx, "dnd.endDnd", values, response); err != nil {
+ return err
+ }
+
+ return response.Err()
+}
+
+// EndSnooze ends the current user's snooze mode
+func (api *Client) EndSnooze() (*DNDStatus, error) {
+ return api.EndSnoozeContext(context.Background())
+}
+
+// EndSnoozeContext ends the current user's snooze mode with a custom context
+func (api *Client) EndSnoozeContext(ctx context.Context) (*DNDStatus, error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+
+ response, err := api.dndRequest(ctx, "dnd.endSnooze", values)
+ if err != nil {
+ return nil, err
+ }
+ return &response.DNDStatus, nil
+}
+
+// GetDNDInfo provides information about a user's current Do Not Disturb settings.
+func (api *Client) GetDNDInfo(user *string) (*DNDStatus, error) {
+ return api.GetDNDInfoContext(context.Background(), user)
+}
+
+// GetDNDInfoContext provides information about a user's current Do Not Disturb settings with a custom context.
+func (api *Client) GetDNDInfoContext(ctx context.Context, user *string) (*DNDStatus, error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+ if user != nil {
+ values.Set("user", *user)
+ }
+
+ response, err := api.dndRequest(ctx, "dnd.info", values)
+ if err != nil {
+ return nil, err
+ }
+ return &response.DNDStatus, nil
+}
+
+// GetDNDTeamInfo provides information about a user's current Do Not Disturb settings.
+func (api *Client) GetDNDTeamInfo(users []string) (map[string]DNDStatus, error) {
+ return api.GetDNDTeamInfoContext(context.Background(), users)
+}
+
+// GetDNDTeamInfoContext provides information about a user's current Do Not Disturb settings with a custom context.
+func (api *Client) GetDNDTeamInfoContext(ctx context.Context, users []string) (map[string]DNDStatus, error) {
+ values := url.Values{
+ "token": {api.token},
+ "users": {strings.Join(users, ",")},
+ }
+ response := &dndTeamInfoResponse{}
+
+ if err := api.postMethod(ctx, "dnd.teamInfo", values, response); err != nil {
+ return nil, err
+ }
+
+ if response.Err() != nil {
+ return nil, response.Err()
+ }
+
+ return response.Users, nil
+}
+
+// SetSnooze adjusts the snooze duration for a user's Do Not Disturb
+// settings. If a snooze session is not already active for the user, invoking
+// this method will begin one for the specified duration.
+func (api *Client) SetSnooze(minutes int) (*DNDStatus, error) {
+ return api.SetSnoozeContext(context.Background(), minutes)
+}
+
+// SetSnoozeContext adjusts the snooze duration for a user's Do Not Disturb settings with a custom context.
+// For more information see the SetSnooze docs
+func (api *Client) SetSnoozeContext(ctx context.Context, minutes int) (*DNDStatus, error) {
+ values := url.Values{
+ "token": {api.token},
+ "num_minutes": {strconv.Itoa(minutes)},
+ }
+
+ response, err := api.dndRequest(ctx, "dnd.setSnooze", values)
+ if err != nil {
+ return nil, err
+ }
+ return &response.DNDStatus, nil
+}
diff --git a/vendor/github.com/slack-go/slack/emoji.go b/vendor/github.com/slack-go/slack/emoji.go
new file mode 100644
index 0000000..b2b0c6c
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/emoji.go
@@ -0,0 +1,35 @@
+package slack
+
+import (
+ "context"
+ "net/url"
+)
+
+type emojiResponseFull struct {
+ Emoji map[string]string `json:"emoji"`
+ SlackResponse
+}
+
+// GetEmoji retrieves all the emojis
+func (api *Client) GetEmoji() (map[string]string, error) {
+ return api.GetEmojiContext(context.Background())
+}
+
+// GetEmojiContext retrieves all the emojis with a custom context
+func (api *Client) GetEmojiContext(ctx context.Context) (map[string]string, error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+ response := &emojiResponseFull{}
+
+ err := api.postMethod(ctx, "emoji.list", values, response)
+ if err != nil {
+ return nil, err
+ }
+
+ if response.Err() != nil {
+ return nil, response.Err()
+ }
+
+ return response.Emoji, nil
+}
diff --git a/vendor/github.com/slack-go/slack/errors.go b/vendor/github.com/slack-go/slack/errors.go
new file mode 100644
index 0000000..8be22a6
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/errors.go
@@ -0,0 +1,21 @@
+package slack
+
+import "github.com/slack-go/slack/internal/errorsx"
+
+// Errors returned by various methods.
+const (
+ ErrAlreadyDisconnected = errorsx.String("Invalid call to Disconnect - Slack API is already disconnected")
+ ErrRTMDisconnected = errorsx.String("disconnect received while trying to connect")
+ ErrRTMGoodbye = errorsx.String("goodbye detected")
+ ErrRTMDeadman = errorsx.String("deadman switch triggered")
+ ErrParametersMissing = errorsx.String("received empty parameters")
+ ErrBlockIDNotUnique = errorsx.String("Block ID needs to be unique")
+ ErrInvalidConfiguration = errorsx.String("invalid configuration")
+ ErrMissingHeaders = errorsx.String("missing headers")
+ ErrExpiredTimestamp = errorsx.String("timestamp is too old")
+)
+
+// internal errors
+const (
+ errPaginationComplete = errorsx.String("pagination complete")
+)
diff --git a/vendor/github.com/slack-go/slack/files.go b/vendor/github.com/slack-go/slack/files.go
new file mode 100644
index 0000000..3562844
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/files.go
@@ -0,0 +1,597 @@
+package slack
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+const (
+ // Add here the defaults in the siten
+ DEFAULT_FILES_USER = ""
+ DEFAULT_FILES_CHANNEL = ""
+ DEFAULT_FILES_TS_FROM = 0
+ DEFAULT_FILES_TS_TO = -1
+ DEFAULT_FILES_TYPES = "all"
+ DEFAULT_FILES_COUNT = 100
+ DEFAULT_FILES_PAGE = 1
+ DEFAULT_FILES_SHOW_HIDDEN = false
+)
+
+// File contains all the information for a file
+type File struct {
+ ID string `json:"id"`
+ Created JSONTime `json:"created"`
+ Timestamp JSONTime `json:"timestamp"`
+
+ Name string `json:"name"`
+ Title string `json:"title"`
+ Mimetype string `json:"mimetype"`
+ ImageExifRotation int `json:"image_exif_rotation"`
+ Filetype string `json:"filetype"`
+ PrettyType string `json:"pretty_type"`
+ User string `json:"user"`
+
+ Mode string `json:"mode"`
+ Editable bool `json:"editable"`
+ IsExternal bool `json:"is_external"`
+ ExternalType string `json:"external_type"`
+
+ Size int `json:"size"`
+
+ URL string `json:"url"` // Deprecated - never set
+ URLDownload string `json:"url_download"` // Deprecated - never set
+ URLPrivate string `json:"url_private"`
+ URLPrivateDownload string `json:"url_private_download"`
+
+ OriginalH int `json:"original_h"`
+ OriginalW int `json:"original_w"`
+ Thumb64 string `json:"thumb_64"`
+ Thumb80 string `json:"thumb_80"`
+ Thumb160 string `json:"thumb_160"`
+ Thumb360 string `json:"thumb_360"`
+ Thumb360Gif string `json:"thumb_360_gif"`
+ Thumb360W int `json:"thumb_360_w"`
+ Thumb360H int `json:"thumb_360_h"`
+ Thumb480 string `json:"thumb_480"`
+ Thumb480W int `json:"thumb_480_w"`
+ Thumb480H int `json:"thumb_480_h"`
+ Thumb720 string `json:"thumb_720"`
+ Thumb720W int `json:"thumb_720_w"`
+ Thumb720H int `json:"thumb_720_h"`
+ Thumb960 string `json:"thumb_960"`
+ Thumb960W int `json:"thumb_960_w"`
+ Thumb960H int `json:"thumb_960_h"`
+ Thumb1024 string `json:"thumb_1024"`
+ Thumb1024W int `json:"thumb_1024_w"`
+ Thumb1024H int `json:"thumb_1024_h"`
+
+ Permalink string `json:"permalink"`
+ PermalinkPublic string `json:"permalink_public"`
+
+ EditLink string `json:"edit_link"`
+ Preview string `json:"preview"`
+ PreviewHighlight string `json:"preview_highlight"`
+ Lines int `json:"lines"`
+ LinesMore int `json:"lines_more"`
+
+ IsPublic bool `json:"is_public"`
+ PublicURLShared bool `json:"public_url_shared"`
+ Channels []string `json:"channels"`
+ Groups []string `json:"groups"`
+ IMs []string `json:"ims"`
+ InitialComment Comment `json:"initial_comment"`
+ CommentsCount int `json:"comments_count"`
+ NumStars int `json:"num_stars"`
+ IsStarred bool `json:"is_starred"`
+ Shares Share `json:"shares"`
+}
+
+type Share struct {
+ Public map[string][]ShareFileInfo `json:"public"`
+ Private map[string][]ShareFileInfo `json:"private"`
+}
+
+type ShareFileInfo struct {
+ ReplyUsers []string `json:"reply_users"`
+ ReplyUsersCount int `json:"reply_users_count"`
+ ReplyCount int `json:"reply_count"`
+ Ts string `json:"ts"`
+ ThreadTs string `json:"thread_ts"`
+ LatestReply string `json:"latest_reply"`
+ ChannelName string `json:"channel_name"`
+ TeamID string `json:"team_id"`
+}
+
+// FileUploadParameters contains all the parameters necessary (including the optional ones) for an UploadFile() request.
+//
+// There are three ways to upload a file. You can either set Content if file is small, set Reader if file is large,
+// or provide a local file path in File to upload it from your filesystem.
+//
+// Note that when using the Reader option, you *must* specify the Filename, otherwise the Slack API isn't happy.
+type FileUploadParameters struct {
+ File string
+ Content string
+ Reader io.Reader
+ Filetype string
+ Filename string
+ Title string
+ InitialComment string
+ Channels []string
+ ThreadTimestamp string
+}
+
+// GetFilesParameters contains all the parameters necessary (including the optional ones) for a GetFiles() request
+type GetFilesParameters struct {
+ User string
+ Channel string
+ TimestampFrom JSONTime
+ TimestampTo JSONTime
+ Types string
+ Count int
+ Page int
+ ShowHidden bool
+}
+
+// ListFilesParameters contains all the parameters necessary (including the optional ones) for a ListFiles() request
+type ListFilesParameters struct {
+ Limit int
+ User string
+ Channel string
+ Types string
+ Cursor string
+}
+
+type UploadFileV2Parameters struct {
+ File string
+ FileSize int
+ Content string
+ Reader io.Reader
+ Filename string
+ Title string
+ InitialComment string
+ Channel string
+ ThreadTimestamp string
+ AltTxt string
+ SnippetText string
+}
+
+type getUploadURLExternalParameters struct {
+ altText string
+ fileSize int
+ fileName string
+ snippetText string
+}
+
+type getUploadURLExternalResponse struct {
+ UploadURL string `json:"upload_url"`
+ FileID string `json:"file_id"`
+ SlackResponse
+}
+
+type uploadToURLParameters struct {
+ UploadURL string
+ Reader io.Reader
+ File string
+ Content string
+ Filename string
+}
+
+type FileSummary struct {
+ ID string `json:"id"`
+ Title string `json:"title"`
+}
+
+type completeUploadExternalParameters struct {
+ title string
+ channel string
+ initialComment string
+ threadTimestamp string
+}
+
+type completeUploadExternalResponse struct {
+ SlackResponse
+ Files []FileSummary `json:"files"`
+}
+
+type fileResponseFull struct {
+ File `json:"file"`
+ Paging `json:"paging"`
+ Comments []Comment `json:"comments"`
+ Files []File `json:"files"`
+ Metadata ResponseMetadata `json:"response_metadata"`
+
+ SlackResponse
+}
+
+// NewGetFilesParameters provides an instance of GetFilesParameters with all the sane default values set
+func NewGetFilesParameters() GetFilesParameters {
+ return GetFilesParameters{
+ User: DEFAULT_FILES_USER,
+ Channel: DEFAULT_FILES_CHANNEL,
+ TimestampFrom: DEFAULT_FILES_TS_FROM,
+ TimestampTo: DEFAULT_FILES_TS_TO,
+ Types: DEFAULT_FILES_TYPES,
+ Count: DEFAULT_FILES_COUNT,
+ Page: DEFAULT_FILES_PAGE,
+ ShowHidden: DEFAULT_FILES_SHOW_HIDDEN,
+ }
+}
+
+func (api *Client) fileRequest(ctx context.Context, path string, values url.Values) (*fileResponseFull, error) {
+ response := &fileResponseFull{}
+ err := api.postMethod(ctx, path, values, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, response.Err()
+}
+
+// GetFileInfo retrieves a file and related comments
+func (api *Client) GetFileInfo(fileID string, count, page int) (*File, []Comment, *Paging, error) {
+ return api.GetFileInfoContext(context.Background(), fileID, count, page)
+}
+
+// GetFileInfoContext retrieves a file and related comments with a custom context
+func (api *Client) GetFileInfoContext(ctx context.Context, fileID string, count, page int) (*File, []Comment, *Paging, error) {
+ values := url.Values{
+ "token": {api.token},
+ "file": {fileID},
+ "count": {strconv.Itoa(count)},
+ "page": {strconv.Itoa(page)},
+ }
+
+ response, err := api.fileRequest(ctx, "files.info", values)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ return &response.File, response.Comments, &response.Paging, nil
+}
+
+// GetFile retreives a given file from its private download URL
+func (api *Client) GetFile(downloadURL string, writer io.Writer) error {
+ return api.GetFileContext(context.Background(), downloadURL, writer)
+}
+
+// GetFileContext retreives a given file from its private download URL with a custom context
+//
+// For more details, see GetFile documentation.
+func (api *Client) GetFileContext(ctx context.Context, downloadURL string, writer io.Writer) error {
+ return downloadFile(ctx, api.httpclient, api.token, downloadURL, writer, api)
+}
+
+// GetFiles retrieves all files according to the parameters given
+func (api *Client) GetFiles(params GetFilesParameters) ([]File, *Paging, error) {
+ return api.GetFilesContext(context.Background(), params)
+}
+
+// GetFilesContext retrieves all files according to the parameters given with a custom context
+func (api *Client) GetFilesContext(ctx context.Context, params GetFilesParameters) ([]File, *Paging, error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+ if params.User != DEFAULT_FILES_USER {
+ values.Add("user", params.User)
+ }
+ if params.Channel != DEFAULT_FILES_CHANNEL {
+ values.Add("channel", params.Channel)
+ }
+ if params.TimestampFrom != DEFAULT_FILES_TS_FROM {
+ values.Add("ts_from", strconv.FormatInt(int64(params.TimestampFrom), 10))
+ }
+ if params.TimestampTo != DEFAULT_FILES_TS_TO {
+ values.Add("ts_to", strconv.FormatInt(int64(params.TimestampTo), 10))
+ }
+ if params.Types != DEFAULT_FILES_TYPES {
+ values.Add("types", params.Types)
+ }
+ if params.Count != DEFAULT_FILES_COUNT {
+ values.Add("count", strconv.Itoa(params.Count))
+ }
+ if params.Page != DEFAULT_FILES_PAGE {
+ values.Add("page", strconv.Itoa(params.Page))
+ }
+ if params.ShowHidden != DEFAULT_FILES_SHOW_HIDDEN {
+ values.Add("show_files_hidden_by_limit", strconv.FormatBool(params.ShowHidden))
+ }
+
+ response, err := api.fileRequest(ctx, "files.list", values)
+ if err != nil {
+ return nil, nil, err
+ }
+ return response.Files, &response.Paging, nil
+}
+
+// ListFiles retrieves all files according to the parameters given. Uses cursor based pagination.
+func (api *Client) ListFiles(params ListFilesParameters) ([]File, *ListFilesParameters, error) {
+ return api.ListFilesContext(context.Background(), params)
+}
+
+// ListFilesContext retrieves all files according to the parameters given with a custom context.
+//
+// For more details, see ListFiles documentation.
+func (api *Client) ListFilesContext(ctx context.Context, params ListFilesParameters) ([]File, *ListFilesParameters, error) {
+ values := url.Values{
+ "token": {api.token},
+ }
+
+ if params.User != DEFAULT_FILES_USER {
+ values.Add("user", params.User)
+ }
+ if params.Channel != DEFAULT_FILES_CHANNEL {
+ values.Add("channel", params.Channel)
+ }
+ if params.Limit != DEFAULT_FILES_COUNT {
+ values.Add("limit", strconv.Itoa(params.Limit))
+ }
+ if params.Cursor != "" {
+ values.Add("cursor", params.Cursor)
+ }
+
+ response, err := api.fileRequest(ctx, "files.list", values)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ params.Cursor = response.Metadata.Cursor
+
+ return response.Files, ¶ms, nil
+}
+
+// UploadFile uploads a file
+func (api *Client) UploadFile(params FileUploadParameters) (file *File, err error) {
+ return api.UploadFileContext(context.Background(), params)
+}
+
+// UploadFileContext uploads a file and setting a custom context
+func (api *Client) UploadFileContext(ctx context.Context, params FileUploadParameters) (file *File, err error) {
+ // Test if user token is valid. This helps because client.Do doesn't like this for some reason. XXX: More
+ // investigation needed, but for now this will do.
+ _, err = api.AuthTestContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ response := &fileResponseFull{}
+ values := url.Values{}
+ if params.Filetype != "" {
+ values.Add("filetype", params.Filetype)
+ }
+ if params.Filename != "" {
+ values.Add("filename", params.Filename)
+ }
+ if params.Title != "" {
+ values.Add("title", params.Title)
+ }
+ if params.InitialComment != "" {
+ values.Add("initial_comment", params.InitialComment)
+ }
+ if params.ThreadTimestamp != "" {
+ values.Add("thread_ts", params.ThreadTimestamp)
+ }
+ if len(params.Channels) != 0 {
+ values.Add("channels", strings.Join(params.Channels, ","))
+ }
+ if params.Content != "" {
+ values.Add("content", params.Content)
+ values.Add("token", api.token)
+ err = api.postMethod(ctx, "files.upload", values, response)
+ } else if params.File != "" {
+ err = postLocalWithMultipartResponse(ctx, api.httpclient, api.endpoint+"files.upload", params.File, "file", api.token, values, response, api)
+ } else if params.Reader != nil {
+ if params.Filename == "" {
+ return nil, fmt.Errorf("files.upload: FileUploadParameters.Filename is mandatory when using FileUploadParameters.Reader")
+ }
+ err = postWithMultipartResponse(ctx, api.httpclient, api.endpoint+"files.upload", params.Filename, "file", api.token, values, params.Reader, response, api)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &response.File, response.Err()
+}
+
+// DeleteFileComment deletes a file's comment
+func (api *Client) DeleteFileComment(commentID, fileID string) error {
+ return api.DeleteFileCommentContext(context.Background(), fileID, commentID)
+}
+
+// DeleteFileCommentContext deletes a file's comment with a custom context
+func (api *Client) DeleteFileCommentContext(ctx context.Context, fileID, commentID string) (err error) {
+ if fileID == "" || commentID == "" {
+ return ErrParametersMissing
+ }
+
+ values := url.Values{
+ "token": {api.token},
+ "file": {fileID},
+ "id": {commentID},
+ }
+ _, err = api.fileRequest(ctx, "files.comments.delete", values)
+ return err
+}
+
+// DeleteFile deletes a file
+func (api *Client) DeleteFile(fileID string) error {
+ return api.DeleteFileContext(context.Background(), fileID)
+}
+
+// DeleteFileContext deletes a file with a custom context
+func (api *Client) DeleteFileContext(ctx context.Context, fileID string) (err error) {
+ values := url.Values{
+ "token": {api.token},
+ "file": {fileID},
+ }
+
+ _, err = api.fileRequest(ctx, "files.delete", values)
+ return err
+}
+
+// RevokeFilePublicURL disables public/external sharing for a file
+func (api *Client) RevokeFilePublicURL(fileID string) (*File, error) {
+ return api.RevokeFilePublicURLContext(context.Background(), fileID)
+}
+
+// RevokeFilePublicURLContext disables public/external sharing for a file with a custom context
+func (api *Client) RevokeFilePublicURLContext(ctx context.Context, fileID string) (*File, error) {
+ values := url.Values{
+ "token": {api.token},
+ "file": {fileID},
+ }
+
+ response, err := api.fileRequest(ctx, "files.revokePublicURL", values)
+ if err != nil {
+ return nil, err
+ }
+ return &response.File, nil
+}
+
+// ShareFilePublicURL enabled public/external sharing for a file
+func (api *Client) ShareFilePublicURL(fileID string) (*File, []Comment, *Paging, error) {
+ return api.ShareFilePublicURLContext(context.Background(), fileID)
+}
+
+// ShareFilePublicURLContext enabled public/external sharing for a file with a custom context
+func (api *Client) ShareFilePublicURLContext(ctx context.Context, fileID string) (*File, []Comment, *Paging, error) {
+ values := url.Values{
+ "token": {api.token},
+ "file": {fileID},
+ }
+
+ response, err := api.fileRequest(ctx, "files.sharedPublicURL", values)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ return &response.File, response.Comments, &response.Paging, nil
+}
+
+// getUploadURLExternal gets a URL and fileID from slack which can later be used to upload a file
+func (api *Client) getUploadURLExternal(ctx context.Context, params getUploadURLExternalParameters) (*getUploadURLExternalResponse, error) {
+ values := url.Values{
+ "token": {api.token},
+ "filename": {params.fileName},
+ "length": {strconv.Itoa(params.fileSize)},
+ }
+ if params.altText != "" {
+ values.Add("initial_comment", params.altText)
+ }
+ if params.snippetText != "" {
+ values.Add("thread_ts", params.snippetText)
+ }
+ response := &getUploadURLExternalResponse{}
+ err := api.postMethod(ctx, "files.getUploadURLExternal", values, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, response.Err()
+}
+
+// uploadToURL uploads the file to the provided URL using post method
+func (api *Client) uploadToURL(ctx context.Context, params uploadToURLParameters) (err error) {
+ values := url.Values{}
+ if params.Content != "" {
+ values.Add("content", params.Content)
+ values.Add("token", api.token)
+ err = postForm(ctx, api.httpclient, params.UploadURL, values, nil, api)
+ } else if params.File != "" {
+ err = postLocalWithMultipartResponse(ctx, api.httpclient, params.UploadURL, params.File, "file", api.token, values, nil, api)
+ } else if params.Reader != nil {
+ err = postWithMultipartResponse(ctx, api.httpclient, params.UploadURL, params.Filename, "file", api.token, values, params.Reader, nil, api)
+ }
+ return err
+}
+
+// completeUploadExternal once files are uploaded, this completes the upload and shares it to the specified channel
+func (api *Client) completeUploadExternal(ctx context.Context, fileID string, params completeUploadExternalParameters) (file *completeUploadExternalResponse, err error) {
+ request := []FileSummary{{ID: fileID, Title: params.title}}
+ requestBytes, err := json.Marshal(request)
+ if err != nil {
+ return nil, err
+ }
+ values := url.Values{
+ "token": {api.token},
+ "files": {string(requestBytes)},
+ "channel_id": {params.channel},
+ }
+
+ if params.initialComment != "" {
+ values.Add("initial_comment", params.initialComment)
+ }
+ if params.threadTimestamp != "" {
+ values.Add("thread_ts", params.threadTimestamp)
+ }
+ response := &completeUploadExternalResponse{}
+ err = api.postMethod(ctx, "files.completeUploadExternal", values, response)
+ if err != nil {
+ return nil, err
+ }
+ if response.Err() != nil {
+ return nil, response.Err()
+ }
+ return response, nil
+}
+
+// UploadFileV2 uploads file to a given slack channel using 3 steps -
+// 1. Get an upload URL using files.getUploadURLExternal API
+// 2. Send the file as a post to the URL provided by slack
+// 3. Complete the upload and share it to the specified channel using files.completeUploadExternal
+func (api *Client) UploadFileV2(params UploadFileV2Parameters) (*FileSummary, error) {
+ return api.UploadFileV2Context(context.Background(), params)
+}
+
+// UploadFileV2 uploads file to a given slack channel using 3 steps with a custom context -
+// 1. Get an upload URL using files.getUploadURLExternal API
+// 2. Send the file as a post to the URL provided by slack
+// 3. Complete the upload and share it to the specified channel using files.completeUploadExternal
+func (api *Client) UploadFileV2Context(ctx context.Context, params UploadFileV2Parameters) (file *FileSummary, err error) {
+ if params.Filename == "" {
+ return nil, fmt.Errorf("file.upload.v2: filename cannot be empty")
+ }
+ if params.FileSize == 0 {
+ return nil, fmt.Errorf("file.upload.v2: file size cannot be 0")
+ }
+ if params.Channel == "" {
+ return nil, fmt.Errorf("file.upload.v2: channel cannot be empty")
+ }
+ u, err := api.getUploadURLExternal(ctx, getUploadURLExternalParameters{
+ altText: params.AltTxt,
+ fileName: params.Filename,
+ fileSize: params.FileSize,
+ snippetText: params.SnippetText,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ err = api.uploadToURL(ctx, uploadToURLParameters{
+ UploadURL: u.UploadURL,
+ Reader: params.Reader,
+ File: params.File,
+ Content: params.Content,
+ Filename: params.Filename,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := api.completeUploadExternal(ctx, u.FileID, completeUploadExternalParameters{
+ title: params.Title,
+ channel: params.Channel,
+ initialComment: params.InitialComment,
+ threadTimestamp: params.ThreadTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(c.Files) != 1 {
+ return nil, fmt.Errorf("file.upload.v2: something went wrong; received %d files instead of 1", len(c.Files))
+ }
+
+ return &c.Files[0], nil
+}
diff --git a/vendor/github.com/slack-go/slack/groups.go b/vendor/github.com/slack-go/slack/groups.go
new file mode 100644
index 0000000..b77f909
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/groups.go
@@ -0,0 +1,7 @@
+package slack
+
+// Group contains all the information for a group
+type Group struct {
+ GroupConversation
+ IsGroup bool `json:"is_group"`
+}
diff --git a/vendor/github.com/slack-go/slack/history.go b/vendor/github.com/slack-go/slack/history.go
new file mode 100644
index 0000000..49dfe35
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/history.go
@@ -0,0 +1,37 @@
+package slack
+
+const (
+ DEFAULT_HISTORY_LATEST = ""
+ DEFAULT_HISTORY_OLDEST = "0"
+ DEFAULT_HISTORY_COUNT = 100
+ DEFAULT_HISTORY_INCLUSIVE = false
+ DEFAULT_HISTORY_UNREADS = false
+)
+
+// HistoryParameters contains all the necessary information to help in the retrieval of history for Channels/Groups/DMs
+type HistoryParameters struct {
+ Latest string
+ Oldest string
+ Count int
+ Inclusive bool
+ Unreads bool
+}
+
+// History contains message history information needed to navigate a Channel / Group / DM history
+type History struct {
+ Latest string `json:"latest"`
+ Messages []Message `json:"messages"`
+ HasMore bool `json:"has_more"`
+ Unread int `json:"unread_count_display"`
+}
+
+// NewHistoryParameters provides an instance of HistoryParameters with all the sane default values set
+func NewHistoryParameters() HistoryParameters {
+ return HistoryParameters{
+ Latest: DEFAULT_HISTORY_LATEST,
+ Oldest: DEFAULT_HISTORY_OLDEST,
+ Count: DEFAULT_HISTORY_COUNT,
+ Inclusive: DEFAULT_HISTORY_INCLUSIVE,
+ Unreads: DEFAULT_HISTORY_UNREADS,
+ }
+}
diff --git a/vendor/github.com/slack-go/slack/im.go b/vendor/github.com/slack-go/slack/im.go
new file mode 100644
index 0000000..7c4bc25
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/im.go
@@ -0,0 +1,21 @@
+package slack
+
+type imChannel struct {
+ ID string `json:"id"`
+}
+
+type imResponseFull struct {
+ NoOp bool `json:"no_op"`
+ AlreadyClosed bool `json:"already_closed"`
+ AlreadyOpen bool `json:"already_open"`
+ Channel imChannel `json:"channel"`
+ IMs []IM `json:"ims"`
+ History
+ SlackResponse
+}
+
+// IM contains information related to the Direct Message channel
+type IM struct {
+ Conversation
+ IsUserDeleted bool `json:"is_user_deleted"`
+}
diff --git a/vendor/github.com/slack-go/slack/info.go b/vendor/github.com/slack-go/slack/info.go
new file mode 100644
index 0000000..b06dffd
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/info.go
@@ -0,0 +1,476 @@
+package slack
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type UserPrefsCarrier struct {
+ SlackResponse
+ UserPrefs *UserPrefs `json:"prefs"`
+}
+
+// UserPrefs carries a bunch of user settings including some unknown types
+type UserPrefs struct {
+ UserColors string `json:"user_colors,omitempty"`
+ ColorNamesInList bool `json:"color_names_in_list,omitempty"`
+ // Keyboard UnknownType `json:"keyboard"`
+ EmailAlerts string `json:"email_alerts,omitempty"`
+ EmailAlertsSleepUntil int `json:"email_alerts_sleep_until,omitempty"`
+ EmailTips bool `json:"email_tips,omitempty"`
+ EmailWeekly bool `json:"email_weekly,omitempty"`
+ EmailOffers bool `json:"email_offers,omitempty"`
+ EmailResearch bool `json:"email_research,omitempty"`
+ EmailDeveloper bool `json:"email_developer,omitempty"`
+ WelcomeMessageHidden bool `json:"welcome_message_hidden,omitempty"`
+ SearchSort string `json:"search_sort,omitempty"`
+ SearchFileSort string `json:"search_file_sort,omitempty"`
+ SearchChannelSort string `json:"search_channel_sort,omitempty"`
+ SearchPeopleSort string `json:"search_people_sort,omitempty"`
+ ExpandInlineImages bool `json:"expand_inline_images,omitempty"`
+ ExpandInternalInlineImages bool `json:"expand_internal_inline_images,omitempty"`
+ ExpandSnippets bool `json:"expand_snippets,omitempty"`
+ PostsFormattingGuide bool `json:"posts_formatting_guide,omitempty"`
+ SeenWelcome2 bool `json:"seen_welcome_2,omitempty"`
+ SeenSSBPrompt bool `json:"seen_ssb_prompt,omitempty"`
+ SpacesNewXpBannerDismissed bool `json:"spaces_new_xp_banner_dismissed,omitempty"`
+ SearchOnlyMyChannels bool `json:"search_only_my_channels,omitempty"`
+ SearchOnlyCurrentTeam bool `json:"search_only_current_team,omitempty"`
+ SearchHideMyChannels bool `json:"search_hide_my_channels,omitempty"`
+ SearchOnlyShowOnline bool `json:"search_only_show_online,omitempty"`
+ SearchHideDeactivatedUsers bool `json:"search_hide_deactivated_users,omitempty"`
+ EmojiMode string `json:"emoji_mode,omitempty"`
+ EmojiUse string `json:"emoji_use,omitempty"`
+ HasInvited bool `json:"has_invited,omitempty"`
+ HasUploaded bool `json:"has_uploaded,omitempty"`
+ HasCreatedChannel bool `json:"has_created_channel,omitempty"`
+ HasSearched bool `json:"has_searched,omitempty"`
+ SearchExcludeChannels string `json:"search_exclude_channels,omitempty"`
+ MessagesTheme string `json:"messages_theme,omitempty"`
+ WebappSpellcheck bool `json:"webapp_spellcheck,omitempty"`
+ NoJoinedOverlays bool `json:"no_joined_overlays,omitempty"`
+ NoCreatedOverlays bool `json:"no_created_overlays,omitempty"`
+ DropboxEnabled bool `json:"dropbox_enabled,omitempty"`
+ SeenDomainInviteReminder bool `json:"seen_domain_invite_reminder,omitempty"`
+ SeenMemberInviteReminder bool `json:"seen_member_invite_reminder,omitempty"`
+ MuteSounds bool `json:"mute_sounds,omitempty"`
+ ArrowHistory bool `json:"arrow_history,omitempty"`
+ TabUIReturnSelects bool `json:"tab_ui_return_selects,omitempty"`
+ ObeyInlineImgLimit bool `json:"obey_inline_img_limit,omitempty"`
+ RequireAt bool `json:"require_at,omitempty"`
+ SsbSpaceWindow string `json:"ssb_space_window,omitempty"`
+ MacSsbBounce string `json:"mac_ssb_bounce,omitempty"`
+ MacSsbBullet bool `json:"mac_ssb_bullet,omitempty"`
+ ExpandNonMediaAttachments bool `json:"expand_non_media_attachments,omitempty"`
+ ShowTyping bool `json:"show_typing,omitempty"`
+ PagekeysHandled bool `json:"pagekeys_handled,omitempty"`
+ LastSnippetType string `json:"last_snippet_type,omitempty"`
+ DisplayRealNamesOverride int `json:"display_real_names_override,omitempty"`
+ DisplayDisplayNames bool `json:"display_display_names,omitempty"`
+ Time24 bool `json:"time24,omitempty"`
+ EnterIsSpecialInTbt bool `json:"enter_is_special_in_tbt,omitempty"`
+ MsgInputSendBtn bool `json:"msg_input_send_btn,omitempty"`
+ MsgInputSendBtnAutoSet bool `json:"msg_input_send_btn_auto_set,omitempty"`
+ MsgInputStickyComposer bool `json:"msg_input_sticky_composer,omitempty"`
+ GraphicEmoticons bool `json:"graphic_emoticons,omitempty"`
+ ConvertEmoticons bool `json:"convert_emoticons,omitempty"`
+ SsEmojis bool `json:"ss_emojis,omitempty"`
+ SeenOnboardingStart bool `json:"seen_onboarding_start,omitempty"`
+ OnboardingCancelled bool `json:"onboarding_cancelled,omitempty"`
+ SeenOnboardingSlackbotConversation bool `json:"seen_onboarding_slackbot_conversation,omitempty"`
+ SeenOnboardingChannels bool `json:"seen_onboarding_channels,omitempty"`
+ SeenOnboardingDirectMessages bool `json:"seen_onboarding_direct_messages,omitempty"`
+ SeenOnboardingInvites bool `json:"seen_onboarding_invites,omitempty"`
+ SeenOnboardingSearch bool `json:"seen_onboarding_search,omitempty"`
+ SeenOnboardingRecentMentions bool `json:"seen_onboarding_recent_mentions,omitempty"`
+ SeenOnboardingStarredItems bool `json:"seen_onboarding_starred_items,omitempty"`
+ SeenOnboardingPrivateGroups bool `json:"seen_onboarding_private_groups,omitempty"`
+ SeenOnboardingBanner bool `json:"seen_onboarding_banner,omitempty"`
+ OnboardingSlackbotConversationStep int `json:"onboarding_slackbot_conversation_step,omitempty"`
+ SetTzAutomatically bool `json:"set_tz_automatically,omitempty"`
+ SuppressLinkWarning bool `json:"suppress_link_warning,omitempty"`
+ DndEnabled bool `json:"dnd_enabled,omitempty"`
+ DndStartHour string `json:"dnd_start_hour,omitempty"`
+ DndEndHour string `json:"dnd_end_hour,omitempty"`
+ DndBeforeMonday string `json:"dnd_before_monday,omitempty"`
+ DndAfterMonday string `json:"dnd_after_monday,omitempty"`
+ DndEnabledMonday string `json:"dnd_enabled_monday,omitempty"`
+ DndBeforeTuesday string `json:"dnd_before_tuesday,omitempty"`
+ DndAfterTuesday string `json:"dnd_after_tuesday,omitempty"`
+ DndEnabledTuesday string `json:"dnd_enabled_tuesday,omitempty"`
+ DndBeforeWednesday string `json:"dnd_before_wednesday,omitempty"`
+ DndAfterWednesday string `json:"dnd_after_wednesday,omitempty"`
+ DndEnabledWednesday string `json:"dnd_enabled_wednesday,omitempty"`
+ DndBeforeThursday string `json:"dnd_before_thursday,omitempty"`
+ DndAfterThursday string `json:"dnd_after_thursday,omitempty"`
+ DndEnabledThursday string `json:"dnd_enabled_thursday,omitempty"`
+ DndBeforeFriday string `json:"dnd_before_friday,omitempty"`
+ DndAfterFriday string `json:"dnd_after_friday,omitempty"`
+ DndEnabledFriday string `json:"dnd_enabled_friday,omitempty"`
+ DndBeforeSaturday string `json:"dnd_before_saturday,omitempty"`
+ DndAfterSaturday string `json:"dnd_after_saturday,omitempty"`
+ DndEnabledSaturday string `json:"dnd_enabled_saturday,omitempty"`
+ DndBeforeSunday string `json:"dnd_before_sunday,omitempty"`
+ DndAfterSunday string `json:"dnd_after_sunday,omitempty"`
+ DndEnabledSunday string `json:"dnd_enabled_sunday,omitempty"`
+ DndDays string `json:"dnd_days,omitempty"`
+ DndCustomNewBadgeSeen bool `json:"dnd_custom_new_badge_seen,omitempty"`
+ DndNotificationScheduleNewBadgeSeen bool `json:"dnd_notification_schedule_new_badge_seen,omitempty"`
+ // UnreadCollapsedChannels unknownType `json:"unread_collapsed_channels,omitempty"`
+ SidebarBehavior string `json:"sidebar_behavior,omitempty"`
+ ChannelSort string `json:"channel_sort,omitempty"`
+ SeparatePrivateChannels bool `json:"separate_private_channels,omitempty"`
+ SeparateSharedChannels bool `json:"separate_shared_channels,omitempty"`
+ SidebarTheme string `json:"sidebar_theme,omitempty"`
+ SidebarThemeCustomValues string `json:"sidebar_theme_custom_values,omitempty"`
+ NoInvitesWidgetInSidebar bool `json:"no_invites_widget_in_sidebar,omitempty"`
+ NoOmniboxInChannels bool `json:"no_omnibox_in_channels,omitempty"`
+
+ KKeyOmniboxAutoHideCount int `json:"k_key_omnibox_auto_hide_count,omitempty"`
+ ShowSidebarQuickswitcherButton bool `json:"show_sidebar_quickswitcher_button,omitempty"`
+ EntOrgWideChannelsSidebar bool `json:"ent_org_wide_channels_sidebar,omitempty"`
+ MarkMsgsReadImmediately bool `json:"mark_msgs_read_immediately,omitempty"`
+ StartScrollAtOldest bool `json:"start_scroll_at_oldest,omitempty"`
+ SnippetEditorWrapLongLines bool `json:"snippet_editor_wrap_long_lines,omitempty"`
+ LsDisabled bool `json:"ls_disabled,omitempty"`
+ FKeySearch bool `json:"f_key_search,omitempty"`
+ KKeyOmnibox bool `json:"k_key_omnibox,omitempty"`
+ PromptedForEmailDisabling bool `json:"prompted_for_email_disabling,omitempty"`
+ NoMacelectronBanner bool `json:"no_macelectron_banner,omitempty"`
+ NoMacssb1Banner bool `json:"no_macssb1_banner,omitempty"`
+ NoMacssb2Banner bool `json:"no_macssb2_banner,omitempty"`
+ NoWinssb1Banner bool `json:"no_winssb1_banner,omitempty"`
+ HideUserGroupInfoPane bool `json:"hide_user_group_info_pane,omitempty"`
+ MentionsExcludeAtUserGroups bool `json:"mentions_exclude_at_user_groups,omitempty"`
+ MentionsExcludeReactions bool `json:"mentions_exclude_reactions,omitempty"`
+ PrivacyPolicySeen bool `json:"privacy_policy_seen,omitempty"`
+ EnterpriseMigrationSeen bool `json:"enterprise_migration_seen,omitempty"`
+ LastTosAcknowledged string `json:"last_tos_acknowledged,omitempty"`
+ SearchExcludeBots bool `json:"search_exclude_bots,omitempty"`
+ LoadLato2 bool `json:"load_lato_2,omitempty"`
+ FullerTimestamps bool `json:"fuller_timestamps,omitempty"`
+ LastSeenAtChannelWarning int `json:"last_seen_at_channel_warning,omitempty"`
+ EmojiAutocompleteBig bool `json:"emoji_autocomplete_big,omitempty"`
+ TwoFactorAuthEnabled bool `json:"two_factor_auth_enabled,omitempty"`
+ // TwoFactorType unknownType `json:"two_factor_type,omitempty"`
+ // TwoFactorBackupType unknownType `json:"two_factor_backup_type,omitempty"`
+ HideHexSwatch bool `json:"hide_hex_swatch,omitempty"`
+ ShowJumperScores bool `json:"show_jumper_scores,omitempty"`
+ EnterpriseMdmCustomMsg string `json:"enterprise_mdm_custom_msg,omitempty"`
+ // EnterpriseExcludedAppTeams unknownType `json:"enterprise_excluded_app_teams,omitempty"`
+ ClientLogsPri string `json:"client_logs_pri,omitempty"`
+ FlannelServerPool string `json:"flannel_server_pool,omitempty"`
+ MentionsExcludeAtChannels bool `json:"mentions_exclude_at_channels,omitempty"`
+ ConfirmClearAllUnreads bool `json:"confirm_clear_all_unreads,omitempty"`
+ ConfirmUserMarkedAway bool `json:"confirm_user_marked_away,omitempty"`
+ BoxEnabled bool `json:"box_enabled,omitempty"`
+ SeenSingleEmojiMsg bool `json:"seen_single_emoji_msg,omitempty"`
+ ConfirmShCallStart bool `json:"confirm_sh_call_start,omitempty"`
+ PreferredSkinTone string `json:"preferred_skin_tone,omitempty"`
+ ShowAllSkinTones bool `json:"show_all_skin_tones,omitempty"`
+ WhatsNewRead int `json:"whats_new_read,omitempty"`
+ // FrecencyJumper unknownType `json:"frecency_jumper,omitempty"`
+ FrecencyEntJumper string `json:"frecency_ent_jumper,omitempty"`
+ FrecencyEntJumperBackup string `json:"frecency_ent_jumper_backup,omitempty"`
+ Jumbomoji bool `json:"jumbomoji,omitempty"`
+ NewxpSeenLastMessage int `json:"newxp_seen_last_message,omitempty"`
+ ShowMemoryInstrument bool `json:"show_memory_instrument,omitempty"`
+ EnableUnreadView bool `json:"enable_unread_view,omitempty"`
+ SeenUnreadViewCoachmark bool `json:"seen_unread_view_coachmark,omitempty"`
+ EnableReactEmojiPicker bool `json:"enable_react_emoji_picker,omitempty"`
+ SeenCustomStatusBadge bool `json:"seen_custom_status_badge,omitempty"`
+ SeenCustomStatusCallout bool `json:"seen_custom_status_callout,omitempty"`
+ SeenCustomStatusExpirationBadge bool `json:"seen_custom_status_expiration_badge,omitempty"`
+ UsedCustomStatusKbShortcut bool `json:"used_custom_status_kb_shortcut,omitempty"`
+ SeenGuestAdminSlackbotAnnouncement bool `json:"seen_guest_admin_slackbot_announcement,omitempty"`
+ SeenThreadsNotificationBanner bool `json:"seen_threads_notification_banner,omitempty"`
+ SeenNameTaggingCoachmark bool `json:"seen_name_tagging_coachmark,omitempty"`
+ AllUnreadsSortOrder string `json:"all_unreads_sort_order,omitempty"`
+ Locale string `json:"locale,omitempty"`
+ SeenIntlChannelNamesCoachmark bool `json:"seen_intl_channel_names_coachmark,omitempty"`
+ SeenP2LocaleChangeMessage int `json:"seen_p2_locale_change_message,omitempty"`
+ SeenLocaleChangeMessage int `json:"seen_locale_change_message,omitempty"`
+ SeenJapaneseLocaleChangeMessage bool `json:"seen_japanese_locale_change_message,omitempty"`
+ SeenSharedChannelsCoachmark bool `json:"seen_shared_channels_coachmark,omitempty"`
+ SeenSharedChannelsOptInChangeMessage bool `json:"seen_shared_channels_opt_in_change_message,omitempty"`
+ HasRecentlySharedaChannel bool `json:"has_recently_shared_a_channel,omitempty"`
+ SeenChannelBrowserAdminCoachmark bool `json:"seen_channel_browser_admin_coachmark,omitempty"`
+ SeenAdministrationMenu bool `json:"seen_administration_menu,omitempty"`
+ SeenDraftsSectionCoachmark bool `json:"seen_drafts_section_coachmark,omitempty"`
+ SeenEmojiUpdateOverlayCoachmark bool `json:"seen_emoji_update_overlay_coachmark,omitempty"`
+ SeenSonicDeluxeToast int `json:"seen_sonic_deluxe_toast,omitempty"`
+ SeenWysiwygDeluxeToast bool `json:"seen_wysiwyg_deluxe_toast,omitempty"`
+ SeenMarkdownPasteToast int `json:"seen_markdown_paste_toast,omitempty"`
+ SeenMarkdownPasteShortcut int `json:"seen_markdown_paste_shortcut,omitempty"`
+ SeenIaEducation bool `json:"seen_ia_education,omitempty"`
+ PlainTextMode bool `json:"plain_text_mode,omitempty"`
+ ShowSharedChannelsEducationBanner bool `json:"show_shared_channels_education_banner,omitempty"`
+ AllowCallsToSetCurrentStatus bool `json:"allow_calls_to_set_current_status,omitempty"`
+ InInteractiveMasMigrationFlow bool `json:"in_interactive_mas_migration_flow,omitempty"`
+ SunsetInteractiveMessageViews int `json:"sunset_interactive_message_views,omitempty"`
+ ShdepPromoCodeSubmitted bool `json:"shdep_promo_code_submitted,omitempty"`
+ SeenShdepSlackbotMessage bool `json:"seen_shdep_slackbot_message,omitempty"`
+ SeenCallsInteractiveCoachmark bool `json:"seen_calls_interactive_coachmark,omitempty"`
+ AllowCmdTabIss bool `json:"allow_cmd_tab_iss,omitempty"`
+ SeenWorkflowBuilderDeluxeToast bool `json:"seen_workflow_builder_deluxe_toast,omitempty"`
+ WorkflowBuilderIntroModalClickedThrough bool `json:"workflow_builder_intro_modal_clicked_through,omitempty"`
+ // WorkflowBuilderCoachmarks unknownType `json:"workflow_builder_coachmarks,omitempty"`
+ SeenGdriveCoachmark bool `json:"seen_gdrive_coachmark,omitempty"`
+ OverloadedMessageEnabled bool `json:"overloaded_message_enabled,omitempty"`
+ SeenHighlightsCoachmark bool `json:"seen_highlights_coachmark,omitempty"`
+ SeenHighlightsArrowsCoachmark bool `json:"seen_highlights_arrows_coachmark,omitempty"`
+ SeenHighlightsWarmWelcome bool `json:"seen_highlights_warm_welcome,omitempty"`
+ SeenNewSearchUi bool `json:"seen_new_search_ui,omitempty"`
+ SeenChannelSearch bool `json:"seen_channel_search,omitempty"`
+ SeenPeopleSearch bool `json:"seen_people_search,omitempty"`
+ SeenPeopleSearchCount int `json:"seen_people_search_count,omitempty"`
+ DismissedScrollSearchTooltipCount int `json:"dismissed_scroll_search_tooltip_count,omitempty"`
+ LastDismissedScrollSearchTooltipTimestamp int `json:"last_dismissed_scroll_search_tooltip_timestamp,omitempty"`
+ HasUsedQuickswitcherShortcut bool `json:"has_used_quickswitcher_shortcut,omitempty"`
+ SeenQuickswitcherShortcutTipCount int `json:"seen_quickswitcher_shortcut_tip_count,omitempty"`
+ BrowsersDismissedChannelsLowResultsEducation bool `json:"browsers_dismissed_channels_low_results_education,omitempty"`
+ BrowsersSeenInitialChannelsEducation bool `json:"browsers_seen_initial_channels_education,omitempty"`
+ BrowsersDismissedPeopleLowResultsEducation bool `json:"browsers_dismissed_people_low_results_education,omitempty"`
+ BrowsersSeenInitialPeopleEducation bool `json:"browsers_seen_initial_people_education,omitempty"`
+ BrowsersDismissedUserGroupsLowResultsEducation bool `json:"browsers_dismissed_user_groups_low_results_education,omitempty"`
+ BrowsersSeenInitialUserGroupsEducation bool `json:"browsers_seen_initial_user_groups_education,omitempty"`
+ BrowsersDismissedFilesLowResultsEducation bool `json:"browsers_dismissed_files_low_results_education,omitempty"`
+ BrowsersSeenInitialFilesEducation bool `json:"browsers_seen_initial_files_education,omitempty"`
+ A11yAnimations bool `json:"a11y_animations,omitempty"`
+ SeenKeyboardShortcutsCoachmark bool `json:"seen_keyboard_shortcuts_coachmark,omitempty"`
+ NeedsInitialPasswordSet bool `json:"needs_initial_password_set,omitempty"`
+ LessonsEnabled bool `json:"lessons_enabled,omitempty"`
+ TractorEnabled bool `json:"tractor_enabled,omitempty"`
+ TractorExperimentGroup string `json:"tractor_experiment_group,omitempty"`
+ OpenedSlackbotDm bool `json:"opened_slackbot_dm,omitempty"`
+ NewxpSuggestedChannels string `json:"newxp_suggested_channels,omitempty"`
+ OnboardingComplete bool `json:"onboarding_complete,omitempty"`
+ WelcomePlaceState string `json:"welcome_place_state,omitempty"`
+ // OnboardingRoleApps unknownType `json:"onboarding_role_apps,omitempty"`
+ HasReceivedThreadedMessage bool `json:"has_received_threaded_message,omitempty"`
+ SendYourFirstMessageBannerEnabled bool `json:"send_your_first_message_banner_enabled,omitempty"`
+ WhocanseethisDmMpdmBadge bool `json:"whocanseethis_dm_mpdm_badge,omitempty"`
+ HighlightWords string `json:"highlight_words,omitempty"`
+ ThreadsEverything bool `json:"threads_everything,omitempty"`
+ NoTextInNotifications bool `json:"no_text_in_notifications,omitempty"`
+ PushShowPreview bool `json:"push_show_preview,omitempty"`
+ GrowlsEnabled bool `json:"growls_enabled,omitempty"`
+ AllChannelsLoud bool `json:"all_channels_loud,omitempty"`
+ PushDmAlert bool `json:"push_dm_alert,omitempty"`
+ PushMentionAlert bool `json:"push_mention_alert,omitempty"`
+ PushEverything bool `json:"push_everything,omitempty"`
+ PushIdleWait int `json:"push_idle_wait,omitempty"`
+ PushSound string `json:"push_sound,omitempty"`
+ NewMsgSnd string `json:"new_msg_snd,omitempty"`
+ PushLoudChannels string `json:"push_loud_channels,omitempty"`
+ PushMentionChannels string `json:"push_mention_channels,omitempty"`
+ PushLoudChannelsSet string `json:"push_loud_channels_set,omitempty"`
+ LoudChannels string `json:"loud_channels,omitempty"`
+ NeverChannels string `json:"never_channels,omitempty"`
+ LoudChannelsSet string `json:"loud_channels_set,omitempty"`
+ AtChannelSuppressedChannels string `json:"at_channel_suppressed_channels,omitempty"`
+ PushAtChannelSuppressedChannels string `json:"push_at_channel_suppressed_channels,omitempty"`
+ MutedChannels string `json:"muted_channels,omitempty"`
+ // AllNotificationsPrefs unknownType `json:"all_notifications_prefs,omitempty"`
+ GrowthMsgLimitApproachingCtaCount int `json:"growth_msg_limit_approaching_cta_count,omitempty"`
+ GrowthMsgLimitApproachingCtaTs int `json:"growth_msg_limit_approaching_cta_ts,omitempty"`
+ GrowthMsgLimitReachedCtaCount int `json:"growth_msg_limit_reached_cta_count,omitempty"`
+ GrowthMsgLimitReachedCtaLastTs int `json:"growth_msg_limit_reached_cta_last_ts,omitempty"`
+ GrowthMsgLimitLongReachedCtaCount int `json:"growth_msg_limit_long_reached_cta_count,omitempty"`
+ GrowthMsgLimitLongReachedCtaLastTs int `json:"growth_msg_limit_long_reached_cta_last_ts,omitempty"`
+ GrowthMsgLimitSixtyDayBannerCtaCount int `json:"growth_msg_limit_sixty_day_banner_cta_count,omitempty"`
+ GrowthMsgLimitSixtyDayBannerCtaLastTs int `json:"growth_msg_limit_sixty_day_banner_cta_last_ts,omitempty"`
+ // GrowthAllBannersPrefs unknownType `json:"growth_all_banners_prefs,omitempty"`
+ AnalyticsUpsellCoachmarkSeen bool `json:"analytics_upsell_coachmark_seen,omitempty"`
+ SeenAppSpaceCoachmark bool `json:"seen_app_space_coachmark,omitempty"`
+ SeenAppSpaceTutorial bool `json:"seen_app_space_tutorial,omitempty"`
+ DismissedAppLauncherWelcome bool `json:"dismissed_app_launcher_welcome,omitempty"`
+ DismissedAppLauncherLimit bool `json:"dismissed_app_launcher_limit,omitempty"`
+ Purchaser bool `json:"purchaser,omitempty"`
+ ShowEntOnboarding bool `json:"show_ent_onboarding,omitempty"`
+ FoldersEnabled bool `json:"folders_enabled,omitempty"`
+ // FolderData unknownType `json:"folder_data,omitempty"`
+ SeenCorporateExportAlert bool `json:"seen_corporate_export_alert,omitempty"`
+ ShowAutocompleteHelp int `json:"show_autocomplete_help,omitempty"`
+ DeprecationToastLastSeen int `json:"deprecation_toast_last_seen,omitempty"`
+ DeprecationModalLastSeen int `json:"deprecation_modal_last_seen,omitempty"`
+ Iap1Lab int `json:"iap1_lab,omitempty"`
+ IaTopNavTheme string `json:"ia_top_nav_theme,omitempty"`
+ IaPlatformActionsLab int `json:"ia_platform_actions_lab,omitempty"`
+ ActivityView string `json:"activity_view,omitempty"`
+ FailoverProxyCheckCompleted int `json:"failover_proxy_check_completed,omitempty"`
+ EdgeUploadProxyCheckCompleted int `json:"edge_upload_proxy_check_completed,omitempty"`
+ AppSubdomainCheckCompleted int `json:"app_subdomain_check_completed,omitempty"`
+ AddAppsPromptDismissed bool `json:"add_apps_prompt_dismissed,omitempty"`
+ AddChannelPromptDismissed bool `json:"add_channel_prompt_dismissed,omitempty"`
+ ChannelSidebarHideInvite bool `json:"channel_sidebar_hide_invite,omitempty"`
+ InProdSurveysEnabled bool `json:"in_prod_surveys_enabled,omitempty"`
+ DismissedInstalledAppDmSuggestions string `json:"dismissed_installed_app_dm_suggestions,omitempty"`
+ SeenContextualMessageShortcutsModal bool `json:"seen_contextual_message_shortcuts_modal,omitempty"`
+ SeenMessageNavigationEducationalToast bool `json:"seen_message_navigation_educational_toast,omitempty"`
+ ContextualMessageShortcutsModalWasSeen bool `json:"contextual_message_shortcuts_modal_was_seen,omitempty"`
+ MessageNavigationToastWasSeen bool `json:"message_navigation_toast_was_seen,omitempty"`
+ UpToBrowseKbShortcut bool `json:"up_to_browse_kb_shortcut,omitempty"`
+ ChannelSections string `json:"channel_sections,omitempty"`
+ TZ string `json:"tz,omitempty"`
+}
+
+func (api *Client) GetUserPrefs() (*UserPrefsCarrier, error) {
+ return api.GetUserPrefsContext(context.Background())
+}
+
+func (api *Client) GetUserPrefsContext(ctx context.Context) (*UserPrefsCarrier, error) {
+ response := UserPrefsCarrier{}
+
+ err := api.getMethod(ctx, "users.prefs.get", api.token, url.Values{}, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response, response.Err()
+}
+
+func (api *Client) MuteChat(channelID string) (*UserPrefsCarrier, error) {
+ prefs, err := api.GetUserPrefs()
+ if err != nil {
+ return nil, err
+ }
+ chnls := strings.Split(prefs.UserPrefs.MutedChannels, ",")
+ for _, chn := range chnls {
+ if chn == channelID {
+ return nil, nil // noop
+ }
+ }
+ newChnls := prefs.UserPrefs.MutedChannels + "," + channelID
+ values := url.Values{"token": {api.token}, "muted_channels": {newChnls}, "reason": {"update-muted-channels"}}
+ response := UserPrefsCarrier{}
+
+ err = api.postMethod(context.Background(), "users.prefs.set", values, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response, response.Err()
+}
+
+func (api *Client) UnMuteChat(channelID string) (*UserPrefsCarrier, error) {
+ prefs, err := api.GetUserPrefs()
+ if err != nil {
+ return nil, err
+ }
+ chnls := strings.Split(prefs.UserPrefs.MutedChannels, ",")
+ newChnls := make([]string, len(chnls)-1)
+ for i, chn := range chnls {
+ if chn == channelID {
+ return nil, nil // noop
+ }
+ newChnls[i] = chn
+ }
+ values := url.Values{"token": {api.token}, "muted_channels": {strings.Join(newChnls, ",")}, "reason": {"update-muted-channels"}}
+ response := UserPrefsCarrier{}
+
+ err = api.postMethod(context.Background(), "users.prefs.set", values, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response, response.Err()
+}
+
+// UserDetails contains user details coming in the initial response from StartRTM
+type UserDetails struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Created JSONTime `json:"created"`
+ ManualPresence string `json:"manual_presence"`
+ Prefs UserPrefs `json:"prefs"`
+}
+
+// JSONTime exists so that we can have a String method converting the date
+type JSONTime int64
+
+// String converts the unix timestamp into a string
+func (t JSONTime) String() string {
+ tm := t.Time()
+ return fmt.Sprintf("\"%s\"", tm.Format("Mon Jan _2"))
+}
+
+// Time returns a `time.Time` representation of this value.
+func (t JSONTime) Time() time.Time {
+ return time.Unix(int64(t), 0)
+}
+
+// UnmarshalJSON will unmarshal both string and int JSON values
+func (t *JSONTime) UnmarshalJSON(buf []byte) error {
+ s := bytes.Trim(buf, `"`)
+
+ if bytes.EqualFold(s, []byte("null")) {
+ *t = JSONTime(0)
+ return nil
+ }
+
+ v, err := strconv.Atoi(string(s))
+ if err != nil {
+ return err
+ }
+
+ *t = JSONTime(int64(v))
+ return nil
+}
+
+// Team contains details about a team
+type Team struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Domain string `json:"domain"`
+}
+
+// Icons XXX: needs further investigation
+type Icons struct {
+ Image36 string `json:"image_36,omitempty"`
+ Image48 string `json:"image_48,omitempty"`
+ Image72 string `json:"image_72,omitempty"`
+}
+
+// Info contains various details about the authenticated user and team.
+// It is returned by StartRTM or included in the "ConnectedEvent" RTM event.
+type Info struct {
+ URL string `json:"url,omitempty"`
+ User *UserDetails `json:"self,omitempty"`
+ Team *Team `json:"team,omitempty"`
+}
+
+type infoResponseFull struct {
+ Info
+ SlackResponse
+}
+
+// GetBotByID is deprecated and returns nil
+func (info Info) GetBotByID(botID string) *Bot {
+ return nil
+}
+
+// GetUserByID is deprecated and returns nil
+func (info Info) GetUserByID(userID string) *User {
+ return nil
+}
+
+// GetChannelByID is deprecated and returns nil
+func (info Info) GetChannelByID(channelID string) *Channel {
+ return nil
+}
+
+// GetGroupByID is deprecated and returns nil
+func (info Info) GetGroupByID(groupID string) *Group {
+ return nil
+}
+
+// GetIMByID is deprecated and returns nil
+func (info Info) GetIMByID(imID string) *IM {
+ return nil
+}
diff --git a/vendor/github.com/slack-go/slack/interactions.go b/vendor/github.com/slack-go/slack/interactions.go
new file mode 100644
index 0000000..e362caa
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/interactions.go
@@ -0,0 +1,238 @@
+package slack
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// InteractionType type of interactions
+type InteractionType string
+
+// ActionType type represents the type of action (attachment, block, etc.)
+type ActionType string
+
+// action is an interface that should be implemented by all callback action types
+type action interface {
+ actionType() ActionType
+}
+
+// Types of interactions that can be received.
+const (
+ InteractionTypeDialogCancellation = InteractionType("dialog_cancellation")
+ InteractionTypeDialogSubmission = InteractionType("dialog_submission")
+ InteractionTypeDialogSuggestion = InteractionType("dialog_suggestion")
+ InteractionTypeInteractionMessage = InteractionType("interactive_message")
+ InteractionTypeMessageAction = InteractionType("message_action")
+ InteractionTypeBlockActions = InteractionType("block_actions")
+ InteractionTypeBlockSuggestion = InteractionType("block_suggestion")
+ InteractionTypeViewSubmission = InteractionType("view_submission")
+ InteractionTypeViewClosed = InteractionType("view_closed")
+ InteractionTypeShortcut = InteractionType("shortcut")
+ InteractionTypeWorkflowStepEdit = InteractionType("workflow_step_edit")
+)
+
+// InteractionCallback is sent from slack when a user interactions with a button or dialog.
+type InteractionCallback struct {
+ Type InteractionType `json:"type"`
+ Token string `json:"token"`
+ CallbackID string `json:"callback_id"`
+ ResponseURL string `json:"response_url"`
+ TriggerID string `json:"trigger_id"`
+ ActionTs string `json:"action_ts"`
+ Team Team `json:"team"`
+ Channel Channel `json:"channel"`
+ User User `json:"user"`
+ OriginalMessage Message `json:"original_message"`
+ Message Message `json:"message"`
+ Name string `json:"name"`
+ Value string `json:"value"`
+ MessageTs string `json:"message_ts"`
+ AttachmentID string `json:"attachment_id"`
+ ActionCallback ActionCallbacks `json:"actions"`
+ View View `json:"view"`
+ ActionID string `json:"action_id"`
+ APIAppID string `json:"api_app_id"`
+ BlockID string `json:"block_id"`
+ Container Container `json:"container"`
+ Enterprise Enterprise `json:"enterprise"`
+ WorkflowStep InteractionWorkflowStep `json:"workflow_step"`
+ DialogSubmissionCallback
+ ViewSubmissionCallback
+ ViewClosedCallback
+
+ // FIXME(kanata2): just workaround for backward-compatibility.
+ // See also https://github.com/slack-go/slack/issues/816
+ RawState json.RawMessage `json:"state,omitempty"`
+
+ // BlockActionState stands for the `state` field in block_actions type.
+ // NOTE: InteractionCallback.State has a role for the state of dialog_submission type,
+ // so we cannot use this field for backward-compatibility for now.
+ BlockActionState *BlockActionStates `json:"-"`
+}
+
+type BlockActionStates struct {
+ Values map[string]map[string]BlockAction `json:"values"`
+}
+
+func (ic *InteractionCallback) MarshalJSON() ([]byte, error) {
+ type alias InteractionCallback
+ tmp := alias(*ic)
+ if tmp.Type == InteractionTypeBlockActions {
+ if tmp.BlockActionState == nil {
+ tmp.RawState = []byte(`{}`)
+ } else {
+ state, err := json.Marshal(tmp.BlockActionState.Values)
+ if err != nil {
+ return nil, err
+ }
+ tmp.RawState = []byte(`{"values":` + string(state) + `}`)
+ }
+ } else if ic.Type == InteractionTypeDialogSubmission {
+ tmp.RawState = []byte(tmp.State)
+ }
+ // Use pointer for go1.7
+ return json.Marshal(&tmp)
+}
+
+func (ic *InteractionCallback) UnmarshalJSON(b []byte) error {
+ type alias InteractionCallback
+ tmp := struct {
+ Type InteractionType `json:"type"`
+ *alias
+ }{
+ alias: (*alias)(ic),
+ }
+ if err := json.Unmarshal(b, &tmp); err != nil {
+ return err
+ }
+ *ic = InteractionCallback(*tmp.alias)
+ ic.Type = tmp.Type
+ if ic.Type == InteractionTypeBlockActions {
+ if len(ic.RawState) > 0 {
+ err := json.Unmarshal(ic.RawState, &ic.BlockActionState)
+ if err != nil {
+ return err
+ }
+ }
+ } else if ic.Type == InteractionTypeDialogSubmission {
+ ic.State = string(ic.RawState)
+ }
+ return nil
+}
+
+type Container struct {
+ Type string `json:"type"`
+ ViewID string `json:"view_id"`
+ MessageTs string `json:"message_ts"`
+ ThreadTs string `json:"thread_ts,omitempty"`
+ AttachmentID json.Number `json:"attachment_id"`
+ ChannelID string `json:"channel_id"`
+ IsEphemeral bool `json:"is_ephemeral"`
+ IsAppUnfurl bool `json:"is_app_unfurl"`
+}
+
+type Enterprise struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+}
+
+type InteractionWorkflowStep struct {
+ WorkflowStepEditID string `json:"workflow_step_edit_id,omitempty"`
+ WorkflowID string `json:"workflow_id"`
+ StepID string `json:"step_id"`
+ Inputs *WorkflowStepInputs `json:"inputs,omitempty"`
+ Outputs *[]WorkflowStepOutput `json:"outputs,omitempty"`
+}
+
+// ActionCallback is a convenience struct defined to allow dynamic unmarshalling of
+// the "actions" value in Slack's JSON response, which varies depending on block type
+type ActionCallbacks struct {
+ AttachmentActions []*AttachmentAction
+ BlockActions []*BlockAction
+}
+
+// MarshalJSON implements the Marshaller interface in order to combine both
+// action callback types back into a single array, like how the api responds.
+// This makes Marshaling and Unmarshaling an InteractionCallback symmetrical
+func (a ActionCallbacks) MarshalJSON() ([]byte, error) {
+ count := 0
+ length := len(a.AttachmentActions) + len(a.BlockActions)
+ buffer := bytes.NewBufferString("[")
+
+ f := func(obj interface{}) error {
+ js, err := json.Marshal(obj)
+ if err != nil {
+ return err
+ }
+ _, err = buffer.Write(js)
+ if err != nil {
+ return err
+ }
+
+ count++
+ if count < length {
+ _, err = buffer.WriteString(",")
+ return err
+ }
+ return nil
+ }
+
+ for _, act := range a.AttachmentActions {
+ err := f(act)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, blk := range a.BlockActions {
+ err := f(blk)
+ if err != nil {
+ return nil, err
+ }
+ }
+ buffer.WriteString("]")
+ return buffer.Bytes(), nil
+}
+
+// UnmarshalJSON implements the Marshaller interface in order to delegate
+// marshalling and allow for proper type assertion when decoding the response
+func (a *ActionCallbacks) UnmarshalJSON(data []byte) error {
+ var raw []json.RawMessage
+ err := json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ for _, r := range raw {
+ var obj map[string]interface{}
+ err := json.Unmarshal(r, &obj)
+ if err != nil {
+ return err
+ }
+
+ if _, ok := obj["block_id"].(string); ok {
+ action, err := unmarshalAction(r, &BlockAction{})
+ if err != nil {
+ return err
+ }
+
+ a.BlockActions = append(a.BlockActions, action.(*BlockAction))
+ continue
+ }
+
+ action, err := unmarshalAction(r, &AttachmentAction{})
+ if err != nil {
+ return err
+ }
+ a.AttachmentActions = append(a.AttachmentActions, action.(*AttachmentAction))
+ }
+
+ return nil
+}
+
+func unmarshalAction(r json.RawMessage, callbackAction action) (action, error) {
+ err := json.Unmarshal(r, callbackAction)
+ if err != nil {
+ return nil, err
+ }
+ return callbackAction, nil
+}
diff --git a/vendor/github.com/slack-go/slack/internal/backoff/backoff.go b/vendor/github.com/slack-go/slack/internal/backoff/backoff.go
new file mode 100644
index 0000000..833e9f2
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/internal/backoff/backoff.go
@@ -0,0 +1,62 @@
+package backoff
+
+import (
+ "math/rand"
+ "time"
+)
+
+// This one was ripped from https://github.com/jpillora/backoff/blob/master/backoff.go
+
+// Backoff is a time.Duration counter. It starts at Min. After every
+// call to Duration() it is multiplied by Factor. It is capped at
+// Max. It returns to Min on every call to Reset(). Used in
+// conjunction with the time package.
+type Backoff struct {
+ attempts int
+ // Initial value to scale out
+ Initial time.Duration
+ // Jitter value randomizes an additional delay between 0 and Jitter
+ Jitter time.Duration
+ // Max maximum values of the backoff
+ Max time.Duration
+}
+
+// Returns the current value of the counter and then multiplies it
+// Factor
+func (b *Backoff) Duration() (dur time.Duration) {
+ // Zero-values are nonsensical, so we use
+ // them to apply defaults
+ if b.Max == 0 {
+ b.Max = 10 * time.Second
+ }
+
+ if b.Initial == 0 {
+ b.Initial = 100 * time.Millisecond
+ }
+
+ // calculate this duration
+ if dur = time.Duration(1 << uint(b.attempts)); dur > 0 {
+ dur = dur * b.Initial
+ } else {
+ dur = b.Max
+ }
+
+ if b.Jitter > 0 {
+ dur = dur + time.Duration(rand.Intn(int(b.Jitter)))
+ }
+
+ // bump attempts count
+ b.attempts++
+
+ return dur
+}
+
+// Resets the current value of the counter back to Min
+func (b *Backoff) Reset() {
+ b.attempts = 0
+}
+
+// Attempts returns the number of attempts that we had done so far
+func (b *Backoff) Attempts() int {
+ return b.attempts
+}
diff --git a/vendor/github.com/slack-go/slack/internal/errorsx/errorsx.go b/vendor/github.com/slack-go/slack/internal/errorsx/errorsx.go
new file mode 100644
index 0000000..0182ec6
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/internal/errorsx/errorsx.go
@@ -0,0 +1,17 @@
+package errorsx
+
+// String representing an error, useful for declaring string constants as errors.
+type String string
+
+func (t String) Error() string {
+ return string(t)
+}
+
+// Is reports whether String matches with the target error
+func (t String) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ return t.Error() == target.Error()
+}
diff --git a/vendor/github.com/slack-go/slack/internal/timex/timex.go b/vendor/github.com/slack-go/slack/internal/timex/timex.go
new file mode 100644
index 0000000..40063f7
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/internal/timex/timex.go
@@ -0,0 +1,18 @@
+package timex
+
+import "time"
+
+// Max returns the maximum duration
+func Max(values ...time.Duration) time.Duration {
+ var (
+ max time.Duration
+ )
+
+ for _, v := range values {
+ if v > max {
+ max = v
+ }
+ }
+
+ return max
+}
diff --git a/vendor/github.com/slack-go/slack/item.go b/vendor/github.com/slack-go/slack/item.go
new file mode 100644
index 0000000..89af4eb
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/item.go
@@ -0,0 +1,75 @@
+package slack
+
+const (
+ TYPE_MESSAGE = "message"
+ TYPE_FILE = "file"
+ TYPE_FILE_COMMENT = "file_comment"
+ TYPE_CHANNEL = "channel"
+ TYPE_IM = "im"
+ TYPE_GROUP = "group"
+)
+
+// Item is any type of slack message - message, file, or file comment.
+type Item struct {
+ Type string `json:"type"`
+ Channel string `json:"channel,omitempty"`
+ Message *Message `json:"message,omitempty"`
+ File *File `json:"file,omitempty"`
+ Comment *Comment `json:"comment,omitempty"`
+ Timestamp string `json:"ts,omitempty"`
+}
+
+// NewMessageItem turns a message on a channel into a typed message struct.
+func NewMessageItem(ch string, m *Message) Item {
+ return Item{Type: TYPE_MESSAGE, Channel: ch, Message: m}
+}
+
+// NewFileItem turns a file into a typed file struct.
+func NewFileItem(f *File) Item {
+ return Item{Type: TYPE_FILE, File: f}
+}
+
+// NewFileCommentItem turns a file and comment into a typed file_comment struct.
+func NewFileCommentItem(f *File, c *Comment) Item {
+ return Item{Type: TYPE_FILE_COMMENT, File: f, Comment: c}
+}
+
+// NewChannelItem turns a channel id into a typed channel struct.
+func NewChannelItem(ch string) Item {
+ return Item{Type: TYPE_CHANNEL, Channel: ch}
+}
+
+// NewIMItem turns a channel id into a typed im struct.
+func NewIMItem(ch string) Item {
+ return Item{Type: TYPE_IM, Channel: ch}
+}
+
+// NewGroupItem turns a channel id into a typed group struct.
+func NewGroupItem(ch string) Item {
+ return Item{Type: TYPE_GROUP, Channel: ch}
+}
+
+// ItemRef is a reference to a message of any type. One of FileID,
+// CommentId, or the combination of ChannelId and Timestamp must be
+// specified.
+type ItemRef struct {
+ Channel string `json:"channel"`
+ Timestamp string `json:"timestamp"`
+ File string `json:"file"`
+ Comment string `json:"file_comment"`
+}
+
+// NewRefToMessage initializes a reference to to a message.
+func NewRefToMessage(channel, timestamp string) ItemRef {
+ return ItemRef{Channel: channel, Timestamp: timestamp}
+}
+
+// NewRefToFile initializes a reference to a file.
+func NewRefToFile(file string) ItemRef {
+ return ItemRef{File: file}
+}
+
+// NewRefToComment initializes a reference to a file comment.
+func NewRefToComment(comment string) ItemRef {
+ return ItemRef{Comment: comment}
+}
diff --git a/vendor/github.com/slack-go/slack/logger.go b/vendor/github.com/slack-go/slack/logger.go
new file mode 100644
index 0000000..90cb3ca
--- /dev/null
+++ b/vendor/github.com/slack-go/slack/logger.go
@@ -0,0 +1,60 @@
+package slack
+
+import (
+ "fmt"
+)
+
+// logger is a logger interface compatible with both stdlib and some
+// 3rd party loggers.
+type logger interface {
+ Output(int, string) error
+}
+
+// ilogger represents the internal logging api we use.
+type ilogger interface {
+ logger
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+}
+
+type Debug interface {
+ Debug() bool
+
+ // Debugf print a formatted debug line.
+ Debugf(format string, v ...interface{})
+ // Debugln print a debug line.
+ Debugln(v ...interface{})
+}
+
+// internalLog implements the additional methods used by our internal logging.
+type internalLog struct {
+ logger
+}
+
+// Println replicates the behaviour of the standard logger.
+func (t internalLog) Println(v ...interface{}) {
+ t.Output(2, fmt.Sprintln(v...))
+}
+
+// Printf replicates the behaviour of the standard logger.
+func (t internalLog) Printf(format string, v ...interface{}) {
+ t.Output(2, fmt.Sprintf(format, v...))
+}
+
+// Print replicates the behaviour of the standard logger.
+func (t internalLog) Print(v ...interface{}) {
+ t.Output(2, fmt.Sprint(v...))
+}
+
+type discard struct{}
+
+func (t discard) Debug() bool {
+ return false
+}
+
+// Debugf print a formatted debug line.
+func (t discard) Debugf(format string, v ...interface{}) {}
+
+// Debugln print a debug line.
+func (t discard) Debugln(v ...interface{}) {}
diff --git a/vendor/github.com/slack-go/slack/logo.png b/vendor/github.com/slack-go/slack/logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..9bd143459bfe55bb6e01e721623c51a367be7190
GIT binary patch
literal 52440
zcma%i1yEewwr1n*8l1-6-8Hy2t_?KqPH=a3cWYb&gx~~12yVfGBtY=s8ixPB_uYHn
z%&VEH>Z(3_?QeZc_FB76opW}qhT2;UR1#DG0Dz&WAgcucz`S0`dYytox
z9`8Wm8YL8gbLzoXAJ=Ot`-*BWswO@Ilg!he1xAGb{8zVJlSG<
zF)4pr!L4Nw*z*CDnP%bYlNK_F4zF+h(*Gi|XZG@t9ZxNbr(N6q)A636b0uIOYf=2=
z_;LQ5Z*bp>TT?M*6RDAD@YNH=otUUg!qda*w%7F8$@Rli&+6sFQI&OH-_|CPJLPRh
z<7M^L{+UO=DP?g}{&i1ElLY&Z;_qSp8-8NJW=&`aFTJNbN79eaHKEzTer4BOpT
zMGiu*=OuH-zu6M8h^g8$2)Nfm27BR3E
zH@&~UX7Fd&=KH+uhaE+j_bmSS)61s*a`!sVtm(sEpV>j9Ntee$@5%Smr+1*EwK436
zN5t*R%V#m()2H8`e1l7QU-C`9-P}JsI$1vM7mgiaLN0ouQi`M|Z>`pDo@#qppCRH^
z-O~d4&5!bz5~jg{gNMH*7>wvkgbr^6;yc4aU9v=e_TAvECGKB8T}n^eUJ3;djU>X|
zo7Mybogbn7yuVzIoNC;^?s*B=7`=@g8Y6$YX@>{IP`ER885Wp1!|8dcRMkh2HBtjrC=78l#aZ;A7ExO%p>;>d8@J%Vki3
zcXmI97lEmIzrOcIujb1`SaH$vrCAe_KDa&j4JV=jS^Uig!PDccuh^eI?;f0gYi5kW
z<3MdX`dRr-?*<}2=>7@%C~=l$ynZ|V!AB4}IU2Z+RMGi4a_T)tZ`4v(XnenMjmYNR
zc_H`2yQ1;T{z5-af}=;gj~|1#w12H(ufySjBggQ9gfCBTjwYU%6$hbja==j+tQOfp
z)_*)MYVa+9Kj6P+*-|(8kG_BO?=4O?l-AS=+%!H#mX4rTD;0=c%!JhU5{AY9+0G1R
z4;ZHRoL&>0H>p;B_HDTF&`C5+4oBuI?B(dE%LvMX73&`v)lzT6~+{QN=fVX
ziMv6W`XioS6dF@Ccyaqq#uvMBqnb4@P6X3rbF&zW;O2PVh;JsFY=Fst)7d7vQ{pqXv0m9Zl-;~VQbae(h7bd2R!%`yX+y(B~6G5nG{rh
zdtXQI3zwcQu!duN2JT1iUcP?+>VtSOD*H)*G})|vDfr_J8))R2-tZ5kZ9BfXAN}GI
zt+vhiKfqFBJ}YU+6?UL3{W4=?PD=&JSutk2
z1$qpoW8en5gYV5TZ&iXiNFUET%vO%GX`f5)rzx-gun?IC%|Gee2j&h`Urd?xzg+kD
zB|cszx(wbu7NV42e-Z5exY;IA%zmK?E^a#%dVFJo#A-O%B=zkqYivKtdUCx9!qQU2
z)jo0d5Xrhby`g3vM4gD(PtUx-lR75%-q>gCR$9ZKGcF!BXF@L|?J)&@$>=gN7W;?3+ZirrQ
z$>_xLd_B&_O%?|h^#`}mK)Y{~D_H;}R5-a2n_nLWHr`CMfbG&DW+XOAAxQZs(LWAJ-D*
zCQ_UM6;l)_srN6#lcJ3LA<#xH}xyQcl`<06CP@8|iOcc$nER~PC
zZ^zSlp!-IT_30q~WSWQj0XN8$L@BH9a3HZ5lNfkAuuq4<(x6ygD14(vBrX9dB_?
zKhM|t4s4(6al66LD!Jpwg*BK%X>r3xQy&i46uA`TKttZ@HNP%$20WgRXJ03Ehse5b
ze(E2u^I-%Hws>qrd$D*Y&Yk2|X7AvxcR7k(vDdYU2e^#(Df56ANW3HK-KZVuFev32
z*pDaPWfNxS=hzy4Eci7>tb;eh_Fb_0xVB%XXr|r|DpIQ($UlNg9?pV`utfbdonB?R
z#kZRmnOTf&lGVX>s^Kl%>u#Rtk;4g7xC)p2CM*4!ZIhj+K-}7uhQD|XMISQ@&~4K<
z4Z^it!f5fF)Ly*HIi58nvuzH?H|@swLOMn-X7gPX@+BO}PHajBo|$JeB4sWkux$>W
zXxfu_OxPR?CfqtSr@V~IJTBDEw6>MCb(gRD%#JQ%swM#LzIt%mt?$!RHF{J(12Ui`
zuX!<1k2acOH&%C;_gJVl>i3T>?4cK7$`lQ~F+6JdL6C$X7hxd-P;>#43v
z?h7}6Yyo>C?jfV=`X-{-$p#1E(cD^~YHxX6aL0N{Y2#-?s%slUTI2T6o4|r7-^04c
zRur?bo6a_&no@GL$kTI;5{V^#y@3|Yw7Ff*Clq`oyP^&l3ek));qmvbGn#Ds2v63-
zg>ywM_`X`-EE6&6sj{K&Qh>ux(#kF@+h2oVjqCAQU_!1juTJqrrlzcnEYJq>mE#&A
zF(@8@u(vUM=xn;iI2Xl_`Zixje052+efMl|G%9uVwg-OzTg=7Twu>a$eb;hcxHLM0
zV1n-ik?}6A7FcgE#Rz!M23FmRkXggjBos6oU0@#D;MzzuuVB6u4tzJXbX;iP<(6gn
zl*|fGFQ93~y#Il1z9W`w@!ByirDTC_GmeWz9oGCS>eg>X30E{EWCr8b?H`ZmDzl^D
z(2dYYRST;V2uVQ4;7oGim&90(?K(--Y%74^hO^rc=@77n#TE4Uc(zx(d3SQ_29Vq;
zs7d2b@(>Lbuns68Oi$0puw`(B5tYoJNwqb{n(A<4+-r=61~!CmM9Z(h9pVN!vy9^&
z3mgg)i&qdR*W)(9rR0`g2<+WhSO5I%#as&KUZI+B!l%Obn6psGj&IBV~zYRsp#<0wS?~=WkOK+e)J17d57-h@cSo4uFB>FAK
z&K}*Cptn_6GC}(EsvsjgVN@3~9QI=2Q;eXEzzn*Saq|GKwH{10iW$rs{hs;3q6D`c
zTm4}598N(o*yc!bk-gutDzF0F#Bmaw3A-r`E!+EEjiF%N#&~9
zx;POXac5OgrZ;Qta2}{@KX->t?JOP02{R87dAkrVyKq5{0%e6n*7efOVG8F&?_<8X
z7KF#^uSho5f3xc4;X=r<25=gQE84Nq!==REY$8jeg^y0Qpk;CJXzIiqILjX?l_fB#
zJ0i+ON)?K#jmnCqZ@`#W>}qzhyn|E;m^11QtBQe;%$3)ESdtRYuga&WWuH$?+Uq2+
z?`Gc#`f9|;iz&|iWTv#HGyZ5bDOWalerIU4S8AL%26vc3OS1&KgqtKECa1H77X*hC
zMambZ%7IZmmHS18xYGvO^2dta{J?#{kC+(ny}4-Sgz$EUzOgtei*AZ3civkkuW18=
z@NI!|yt?}bJk=vulz|kK!FSwHT;P#@8`V34obL2dRI%;tG?|+3KNTj)CYa&-P(1i7
z1^b3X1
z6;VTMUN-5-JW<;tO!D`}B6oc>f9saGgk)KJnVhXo<{=ls*JP-kv!4Voa0@<+wq{Z-ZN2-PtMsS6Zn1=9u#o*)`
z{j$(I9eQL@H95RhN5J0QhKxz|#b)5X@)i*C1w0BNN)TF>Bbzn1Kp%xa3C~W5qwm~q
ztsV~aDZ&9b#qD|APag2Peu5pt8j6&xU|02qd6H!6jorzaAzQ$2_OY68Gg>EfsO6M>
z8v+iaglVmMsggZC3n93Si-@VyixSK@G+)|AA~j8TW*^kFeuI~7BcACRZJB_AiHP83
zOVC@bOy3n1F~epQ6G4IwYoW9iAIFp^8Fj&|1?P(vGsYADPIAe}%8n>GtctcvIyCDW
z1T&>_+KwQ=O}d>@0qMuE#TFSwVyY;RYiGDr@R%LJf*HMpDmw7zS|XD^Uu_5RRuQI$P~l{t}5Yd}lDB(^cqhy_%&dE)U=bK{1_x;hnl}H0sSi%2
z{F39axG8GfeCT*X$eSv|y~Ci#jj{++vs>APo?&E6$>f5J_emjnjtIn%uiq9n*%2y{>V3Gr-KVPy&os!;C
z+$wIhS;RF@y-HbB%29emEM!?Au-SE_SfJj-do*vy69@~7=t$JH|A~Q*Qw@%~qliES
zlMImLlyl73BJXR)1%=}q-9dmR(XhdK51WCpRRoVmFNannDH4!iJ@PU|D4pnFzmK`1{lw}r>*eU*
z4Bpf^-#+=;cCJy688luT`<_S^bCM*?ho5=WanD;A7*j)Jrs8ME5pzIcz+`~0f({J(
z4a0Y*Z^hg^5viZx9!E@le-JaJwz;67cn)e6%T7~?`fYy@{8FXorN
zKAW%e=y^xt79yA(BD0iAI{4LG5SXce($~@4-U-CTIRoQI0^kXNRz|t<+DUocqAa^*
ztQ3T=w<;XGh3=Xqj2RwrecZ_i&LIrcsCYa{p5M?LI%&HUG*PX)1i2)wl5bU{6KXz(
zd^7)m_B7kKG+5>neu82OJt>)hUB=-PU6ucZA%~$q54>tfdDpn4sKPOXtJ#Zf2aR6l
zELI_21&q?gPG7cCL5KlXu@|vfa48`?j_^IvH5QY)OGR7>pWi3lFcA2LQf)1T(%Q;p
zWiaMpxWhcB#}2jerM9(10}_vL@I#lr#5yj*4pf>OM%y+yQf3L|_$IVkTyX~9aXi6a
z$`}uz-1+zc1t?(_C?NO`-&0#yH1#t};1uQg_!K{NfiuD1(m1zHX`4s-ngbVbYPfzI
zNYY}{m1b1NdeBthJT#NAlJ+29*O7Bm{%(%c+Ak+&$Utb&XolkROy1u`{iY(Inb0y=
zW<~;>NIiw551_%k80;CwAVttu5IxWOe~RPIAP8+v|1fznAu&m&VgV7Rc0XzR(K!71
z5xoaJ+5ET9ytPe?2BNP^q(!UofG#!fIYBHEqZiE_ec7j(s5fDo^0k2yB|-H<@?WmP
ze%;|wvzHd|AR@#w!cC&Wqc2Q=`CH~H$=JEy3Z>MdYNd{gCTOL}IY)Yr8TOE(A%!Em
zl9!d3_i`kVbi-)woPDFplbQRY;-
zot?gP9ue-T$wO!Ngfj-+XjvP0w`3?0^0kt_vD;FWfe@F1vIJ6pIdDX_XeK&o-v>36
z=uu}cYoMS;{1nalV2z~77x;emV}t7i;q
zGQS&vogZz|png|o@fM4(SjHt%x*l9ne)&*U8{lLYKkq5zV^N>@@^oZ@k9*BE)?
zMR3xZB=x0^&eV{azG;})90naC_oggopCuZltbJHAkd?CrQ!gya(dGsqgk!3Jo@14E
zu*1$!%BpV&sc|EJ!l@l^L&tc}WP(yWA_->2fJoVLC+TI7DZfp}ycxxCp-4jsDgX@B
zLKB&WiyGzKlMk*t7^Psma3^kU2lO;VaTcp@lrs2g&v6w-RdGCN$+i-u;argUY7hkJ
zi@s{XkA8umSt@@nvE!|bUECcSkPj7O+5li9&Q{hOyMtD`a!_DF1}S$mvJ`nh3}_eP
zhZ7WW!u?}%-d2HB2RUUoUfmL4z0ZdbWip)m=%_&mo;4ulcYRD&O{@JhoE;WlS>;wG
zd@)-qfSUek5gdp;7(>k((?9q^TV=K>^X?@zdc>NQ-seT?pVp)kqEiLo|kI;}B>
zmmg7Qad}<9+vHft_Cy4JU?b>o8#^>qf3OR?uPwsKFz2
z=OsYgqQlpA8rO8EL&zkjFNzWkh9g4JW7JTUB<^CxROz&><$FGFLR%z2s4rv`AJU_+<{MA1|=AKZ$du@9c>fHWEaBy7(t@Z*ULo5{gpT2
z;E}u(+c?7C24`Oj8RZ$T)0aj}Y)%RZt+7Sc>de@#``TJyPHc=oEzY@>$iKr~bEA)e
z%_Ym;na~X6%IxldKhVZmn1v3ydz`@jVbzPCiQC=PjYQlkxR%lpz84g#
z=;f{d2CbH@I!#QASNV6HE))}D3Wi-N&;`tmY2o^2ZUJ(*luP`V*8LCz5SYe#7AqKB
z_RemvK#~WvtX@_ropXq-2qMAtD$AoYtrtxJ1lC4=fbdA%cu65v2upN=f~?9}AS*0~
zZ%l6Qe@d}@-meVAE2<=R5RVsLYoCkjV|*ZyLvH&RuqFhSMv=y<`j1Z
zhTGTP@1^s;adUZEpY*$MP!W6WO3~_Z@8N03c`^_}!$9Xe2uQ{Q&0B912y$n95bvqW
z#+}2Ernuu2nMtHr8+$%3MyW-=F9n-}d_QQ7n;1>kVxM*~s(GxAv=cs{Fvlu0Z)?Up
zD~K%N(jpQwBu|;OH3?itd^eXX1&m;`_rjL}vRe>PC&;pSVdmtq2QCx1xid=u>|K@c
zo|b#zxYv`w0?Hox&pjnr1L#-DbubuGBD_k@zM2o=@njgCkTf5G(RfJo=v3I{hzx9YNkik!m>u7
zS(}86`1FQ;mFoNsH6$%N^!
z=$e^&lTm`Z_Y+K2R)I7HmS5Zaii&fmfm=EORMghollV#viBoJZj&djn1zAK1TJGEH
zj>Lh&vfQ1u3@4Ay-aUC{n
zz<#AZDU6~|_lYb#c0)*U=~awUN$DuD)tEsXKCK~FC;%;x6A>R*?htqt&zSoKis`+X
z^N1{S91$BsrLroSDkF?+Nxk5iE`m1EgOd#EY&bQE)a8$r94#%F^Bduf$4Uio^qhtsjJCY%yGo)z*o_0R0hE1`9nugf9e$6ZiE&WogK=jBx#vZj{Xf4#ww;^A74l`A8%pd;%Es2-)wO
zL*yo^#WXXLkFr=Af97&$Oe+nLxnyxNY?B7KBPtZ}u(_zoB20!!=&E-@f%Xx&iEpg5
zDf+0@BeI+KHDw=kJO<|Q0=vbj1!_Fe>4Bt1RZ59M)vles3T2a;81$Mm+~gB9Y->5T
z71cBRNQ>o;0OnXgas
z*w|$=_O_|ZRS^(Wft^)UiM_GqvU>U*H&!X>xo@l`arIJ=bOKsQ#A>UUF;tZ(7Bjq0
zL;=&&pQdK~N#QL<-Xc(RagGQy#l)$J59#F_>UDfc+E(6Ug4mQOVdGb}#t=3qq>p_~
z2{Uw%slmaoHKTy%2+iWgg09&=dEs0&hRtS9%GCccX4!-LhSWPc&`#czxM3oE<|xiP
z6CiW{=w7D_z>7iJmUxr8R?p0;sT7=z2VHM3JO*iGWn7H4xETe&B^UWwxYlF1EtjM0
zBl6W{nN?g%qolR1ry1sG=!Oz$c;kl2Egf;$8HbVE8Je8yvL}_XW~VAk;Dq(4H$0N#G}SreZs%d^j`hPs-3h^B
zT|{kZucsKwYfc>q_C}I|T{K<;#)99_LcifY>r@31!2oy~0Ew>uY_4g|OBj?o%dO4eTf
zfHev+t~Z4`Zrblz!w#vlCv~O+GAwnRwSFd7#f&4dNb^u=Ye$>cSgccVaBMXiPT3xH
zeHyIDV$g=&9&A02r$K3PCmY`vbsMj+AC&~uL2Vn9jXeFBZoh%{gk?~aM?)qO&PA{q
zWWDtB96e{P9I+Eyk#m|IevY)>$dDa<8JjSE`;FJyS2)w@IP?r^B{on)WwfF-NuLRO
zG-7B@s*k*ijrk^1ICN-Z!4U6R!(DSU-;a;7>gLzr>q}ALMd|5-cJ&YK9$eVEa9_;%
zoDUhH&6~9X8Kh85yp)ZfjPcFS@L^dY9$choq8)?gn3BxJ?P~dw(E88_-=9W?%{5V+
zFwNmOpHKMY0!SMx3%UsmStCfx&&
z4ihaGHChlmgF&S#V`$-OhAZ~sB*p*&VPYNFeT*w1K%3SNXrQzQrY{9r);7OEz6=cs
z1IYixw&+od?alvefTz0{_qIHfa8WdhsFXVQS(H6dQ$?7u&_h6g{g9H!Wpc%|H?)Sd
zH||*flBRZnE@{iNxe#Te!`Asz2A$%Ks!8qf+j!A1JE)wMy&)njH7~-{9Js{DxLnHZ
z5w|~^sYH;NfI0*c#)4p$>fyVi9mHRj9PCuI&=}`jg8qfYnQO5SGPT8dNJd6`P}@i*
zh9%iD$waQ5u^YBMTnQU8kXa$z?3@{zEKX+&DAS*HUFC67I({t25EU#}!Uy3tSdpuv
zVk(SNnP*Ps`aIY#$sewe4i;E?Hi|&zbQ|Y%29DEH^4fPPv0q{MDjPnt^9^qKb)_C3bXaKTFavRxein98
zR@FOG*jmcz3A&gQDEx{$yqz)`Bxvu)Q|nKI!w%6_JITGu-69_r(zgS^1CEp6n0fKf
z=sL!czPrYp28T?%D^t?dFZcBM89}iM9pzkjAL7+rGvSqG@ELmyiRO$#)3m5N#s`5X
zDpIRELzCqT*K!xo3^K~vBh_)8B0-F355)#6jw|IL7t6#lUR9$E
zZ+U_8Ws;AdueOYq)P~>iTh-u}{h-j$D)o!eHF|%cg4`cYvt#we-Z0+XDU)DOX#u`2
z0MJQGVYk1Lsu;b!^J|2gHtVL!avOCE=LSQmaNEhs=nL+s#McDmYAUTDGsOH-T9FJ1OhO|i0vh|;e$@94ra3#qbv!l}vpAK|mK
zIFy|jMiDZ9Ir5r?>Osq{t5n*_)y&1GHq@~0$dipisL?TM6a&9#Sf@aofzJqFebSNs943d*pKb2t+xs#`azU~x`Z8a(6VPq>mZ-79%Iv9ba
zT$;xTe9kEjV;>kAQCQYV^Hv2NbzivNSq$jLn6HKjHx^w>NzfD&amc$2MZOxjtb$J*
z#k)X0W56!ZU97A(u9XJdGNrbrBtFjS{Ml`;q0KNdtp5
z^bI56Q5qTdZRbT4o%|vyN4LC=l&;nUW37EyNiQyrUI@beD%2U)SA}|sX1_!gv!2eE
zhN7K~6LPCTwz%gS?Av^k*5eJAmx@BWcKt
zt{F+eq}>8qT0-2kf`OWB(vu*JFbG9?B6h#U3Zd5r6r2z{{pS3*-=(xAku!4Fl?i)i
zZj6LrkEbxWb>5mAdN)hU>_e5@!un_Js}fW^{3n}U6WSIYzA@{iviZ$6zAjc%@%4{P%3=R8n>YLGQug^+eZ7xjOLG8|jGknR*J1{uFlley55
zff(QQK)Z9)RC;T`$jtxqmN#QJof0wy?=*O4Q=huUHVTzZe)Fxr
zp~*K`yMJrQUkY2~XY@=Z&g^LYMx#cqz|Q$BT`8_C&b;~}c^&PpRrRy_VhiM?HxYV@
z;uzuvnS97mTP+!muNm?xeTWm&dAylvEM$o27^~9qI^W&<0pV<4es~V1+_rk
zZ1N@!Oqy$
zJg#^(lgy$rUix-Qj6d3rC}C1-mXSC@w(zV3eS)8mw-!T3p~ujS7e7$K65B}fPtM{a
zuFf8zqHX#PK!HTmfh;I}WYJ`~bON?7+>~KmfM<~6;H{)_=)lUMyxw=KtV#05nxK@j
zW4<^6<~IgJ{Vq;zW2G|Odrmyus}VDZ?KdUNT76N%cZ}9Oquj8l6yJ#o#F=fWpnaej
z*DhMOA}UKz1jZ%O5X$JpBaUvf1#zF==rqm;z*0H0(AIX2G)QbzlOk6KUZuk2NfN>l
zKn>Lc#i<#$NJAvkswiryh9vBD3Z>XpPrSdO>~x^)L-}PtLMh@k`LHZy#XseTjeN|4
zV!qciH)2ro+zgZWjv{56nR!_wcdJ{8$K@dQQ>}A=pj9U2ZSx5FdxUlMz3NQBXkC1X
znyTYDZ+Cf@h^`F0dKxK;d1$jsp#^uF_86UuOC}kQFv0au)xvFJ#m{>mq{*QT?jWVf
zx`&0{9uq2u4Z+Mk^wf8J=QRfG8_&@V{M1|J?8hWE5>RAsX!pS~*P7ulQ5DI@+CYJ8
z=^ECz{wHFV>@XodAq_@W%wxN761q6%bbGbQayx+w3vzxf-RL+6N*G&0}M(^oQwU0@JvCvLBVK~>ej
zT>3wZH5Tmkylb@Rd-Kb~4eM70Xs=3S(fI9k=C`&9!l^M%YdSw$6rx%$BDKM%!K)N{
z`#HlX@^eT8Pas^e+hg6-57Ea&942zbWA&Vi7;*N`BFuD)ro01v^=y}O1KnCJ4wvJz
ziHAS>^F4oKQT`ALtb-0a*@sg6SQvtoJj!Ke;*@AlZ)=*p9~U<+Hn2Mk_tSDdT*kspsm*b{OmAkgNIk0&
z;f4FN2I)4))$6d@`lQPCd6pwLC)z&pE2oqhc|a*|EsSJX;ppzSnPVy2IU6~GOQ5(4
zEAh!v7Pq5;j{Sk!=`0yG5i$R&45WLQFQHxvEDV>6dvYap^GT
zplk;mFX()3uSg3|_Ri4|@0T-kxW#5RT@Wvy6|TlPkia2Bu(UDoq@0ASN;3NC?gK$W
zD*b7J5=ocmrPVS$8SR!V6Waiaa-K^`B
z;OvvIF(ypv^yG6}P>M>>!0!HG^Ru>YfDWd4O_T(Fp*^ir?Lx|fe%5REPZ|lv6PbP*
zU0%^~OG_*)rk)XLOhzJ(2KmE2X4(2k1dr8AZkZX_i75!LkGk}9zoIj!e@%@j_l|Ot
z8$sBmP*{Ln^JE-pEGXPaJA99usZbf@KZ|a92Blp?lB>Qd^}cGqhflkfJb^BRHAZQ#4X7|k~0(dr?xnf$e|4Q2vyuu)uu>Ie4{J6x*qp^P5t1?(@g
z?$AjmP3BkU(?Nr~o}8VPhB{AFn`L^zMqSBohc+8}aLnY+Ny$%%nA>Wy;pMR@+iA^z
z#x!#gBN0|^s-AgiNjGfY1W04g2R@X}BSyU|YKD42>e8IanWlf@cnB8&wuvNv@syd
zCajUE0lZf*OmH@|ZjhmDv7PJO>b*mwN|RLHN9Ox^W68RAMkpN-Ek;qDaZ^@drPh27
zIt%cFQXPYLUNGM~FlsQhZO+FlVGEI+8U!jsS0oO?O2awZhgFOnmRIta(=R~W-M_Fc
zt;gvzhik`2Yj%mLK_xS)NH8jk1l6$jLlh=7oUy;Dx|SLcV5q1g4?oTQIgen4lA3Se
ziuILNATj^w@Oj$bST@qEE!3-EdBOfQ`L^cE%G=w+lFOMy6atGls#B`xf;TWFq!-F-wooOZy}}hBiW82el&3~MU6K~I`M|&WN4p!I5WP|}
zkz~aG)@UHItS^9Fp-NU$S|l1@-%Dpdw&usCU3of3F09^McMx)V$)}?dOQVJFWfvlg
zBnL4u`~xvb#q|0(myP4T>Rr^F0(InHZNo?qyq2Orke}j>v#gp3YkFlU!V*s}#Bx)y(R~{qcnA!XX`8gmHtak6J7Y5j1s5
zdJMVi+U_GsHVwN6d_{6!)y&th+?i*#y3ouYdJ&I3G
z9W=uXGlll*%*^YzXXrg@S9j@B!7oXWaQ0O~c;1T>ycN~QT^`mu$=TnE!k9Gv)qlo-
zCIdMxTydRs=_EnaW{sp?*+UNWuBd#ZGMSdEF_Nt{l*-rP1|L1yfAyxrpGAFW%^87_
zh`YGeIl12CjWyC?70Gm0uVm4wUQi244tKtYHx8_CTD*acV9ld*o@B~l{p~^A7&Br~
zExEbjtP6otnpa2NHn%o;ov$+wvb
zCpGHHpWt@I}uN97JJUmwiF=e7@pvkT>HsYDudA_>u+m#8feUO(?EgEQc)zbTF#C^OH2yG7`(sEO)I
zG9q9LBOY%SHK_T4yp4z~RpXy|GN=g)167|DF+XF(qlXdl`X${CToJwo4JRvk%Az`r
zpqZ4y17y|+j>1XgKPjwW&+ka=FtU(<98u)P4;g=2E3s9=SZbeCz1#AlO1tSODEllX
z*v;ozh@d~yTZWWUjQZ}_)<{?Aqgo9U6=icFoxQKGo0%6UPB*7>7W!x8n@wT1fFNHx
ze%r{`_Z5iB9ckO0-62hLBZ%U59Cl+E8(Q=5Z2%)en~a=e(b-1mTq-qUp?+9-EdV9G
zN$sB2Q4CvdQ)ET%Cp6(lOt#D|^vKpsg?nYA{l3a^5O)LPo#7BtDn9b3#lbaaPBCN5
z#K1=p06Ae;a}FqYvYgCS`r2ZS5Xv^or8bVbP@#2`#A+{CJ2$pmkI9TKUzEC^@Cv
zi#dD@Wsx*Of{MXKt7+kI54xM>b+*0}JEWUAWDbd?8fj?}y6!=xcg?f*)XMTkFTA-w
zR8e1EX;EMNKKGfG*~Sw?n^q}$dDce|6VsbTna>dlUNPlK#KRS%
z5IL$YyyIv{66ZV5L;ltE1k4P*qedqfmMQ8QXE3KpNr(M6o|wJZGG<3{u_XVD@?MxY
z;apR|)Vj6p4rQmjE8_1ee#!;x^!M}I4lS`{MnTYN)sttjT6?LnP~Y5i>oOs@IgD!B
zs~81TI_VjD=m9$OuDh^zFM1t*X_^*LVWt7ZW3Bh>G@4H!A7=bE%SjJpwe_C=U~wx|
z_EH$FGW|^UtxjgOH)|ok>pbiOB?-896=t@3*{fETqwW|LSyYjsS5*g7h$Zlya@^LP
z5*4m(d<7R!=)W7e@+-PbR^kEi=yAI(~H}+ojGz;+rXj_vznF`Q7uMG
zh|b#pxherYliPZk9z>T{r(bet%l^#aO_Dw%x6>8RY#gqY!iD2=kDrX<>ZfCN{k3AM
zgkla7L2AOTAJ%G0S4jOl@0yHE5%$$Sab`Q^NEiD+Bhep(_u7Rs)U4k$q2O;Us{rbT
zT@LC_CS1`16Z;*95Wz4kD0h1QDxfg#hZ;ln4?-A_6Vhl~SyV1!o2PkC|L14pp0vh*
zHbOdWP;$Pf%7mIQPoi71@#0{Az9cmh%AurIIeR43d%3<of(Xg-oaezE9E=1^5|-$uehMvYU+b8usWOG31UfsrVqBV|
zbEDJGE;xVmsutFZQBx4GJTZ)w;BEpnqYilgSKleE&k~up|ZcuT3^6)pBAytkfIC7;@gcdI+JZ^+(HHo6KOiTie5>
z697$(XpaVVyzLfhFSjoaW!b59s1eTl0PY~x(J7BsDo9Mq)QDY~{ROW-X0RF_!nHkk
zxT_c6foZugYj#flxxzpGk}S@?j`{0a`n9&plHgCFlaghdV27gtV_DV3!(RtTtWJi#
zPQNC(3jaX=6u%2cAcF}vj$nfbIg8sA1@rWNC^Y@#v6CY#es@j)W;W&0_M$L(tcC$N
z3$0JF6Urhv;1`xxrI-wYgE1+5%Vi5RR4MD
zO()Ug{dNOiu*9|adWRL`H>v(ZN&wK?K^cme_rlL~X~?D!
zkO5vuLAkjqP#v4ZdHt_&1P$9q4!ie46yb!hPS#sCKSv>*DC(8d3
zhEcn2IQ)^1kgJvE$_}S}lO#76q(GQei^;{p)HpNO$&ZfhGxojew0t*oF^r(ySQ&C9
z+}JHb9y08~>}8hOYHvEI#)q%7#&Bs{=DCtG6d
z{W}r!VT8a7rK9oQnIs)D8#(qUcvdv*e+whsHUVn0!PVgVqQJ0Y!QRh|Xo?d7ptK;)$f;%Ei8t_FS{C!t
z+K}_h^7&r8*o3cwcqh9fR7Fbo8}avKRi(!!17=%m1$Hr>I?EP-+LagVL
z&8e!>c+s(=T0~z=CMTklL{-T&L5UZU1z%A2*{19}rdDr%aG(>kXQD6LB?3d~Xj8+G
z-*|yEA|n}3NSNs7JSlF^lLi0acU!sZ42-vgkbo4DD4(!`rk7YhRjpTOkR1fJoFsEm
zHKVnYbUp_k=b`mgRfH{FoY=uuE*21WUnkes^U?r-sD!U8*wO*wNo4`Cv2zv!o_**CQrTIF
z0rmJ)IaOU{Ahvc2@7y8U@6%YVtK&rnXo(^I_eN_!A85eg56)!t4J13i*ubnqHP#l#?)ZNNjSW8y^pA@enF`%ud
zr>igrhmVgByAKb$i@Oa6mynPU2PZcNH#gfWg3ZIv*%R!`=IlZ97sWp~WFa1w?sl%8
zb}r6Te{q5>T)aHRfWX&zs(+Hd&RWX(z6$?o;IHxD^d6p89Ez_4-q(0u0UX?%oPune
z+-%%J9RI2RI;*PsFKuU!e`fJDpB%nmR}LTOo8)Ie30da&ly*lFY%FOlO@Sb+o|6ZW~7SCTZ|A!;5=KjV1
z-_ZY+ufKiyJFmjBE|y+@8C8@O1ODZ&u$7CYot5z4mpnXR9zL*zH52Z2oB~{eY`kDz
zOEw`0Cxng5!U|%=$z^2$;j{b?3Uzn8*G>RC{-;-eQCYoG2|;+Q1^9R^*f_0(_}O?Z
zc?8%j1UQA*xCOz25GydhfS{H2-&B9CgRqo_q8N~yo%7!k4M(u2wTrvcYkS!_Te*C}5|Ht!h=)YK`+&z6<
z+#S^2)h+BHmY)B2p8pB_FD9+m-Oj_)-B0oV5!C;N6a7cQD!l5txcmKweQk)_KSux9
zk{s>+P8Ai^-`jvN*zzCg_W*lCto~kt*F64X%F-6>Yy)}SZ~xhB|0TEk9}1ZtYz>C+
zT3N91a|v;>@e1+rvRMdP@UU5Na|^sy3z(Oa^B;x$FLnYQ`ytGegEAGLPXu3fX%%f%*v+9&i;vw
z^Aj7#hku`Ka)FQk?6Cj&wgUg`LGtpS{;NC${@J5GN&lmWUtJv?ZLQ3{{m*jxPrdoy
zwEHjj|A&tLf8G9HoBjLUVh)a;z$&(MQFOQeKX(7WX!!3>kh3x|vv+p*KMnoAHu-n4
z{1-z6jQQVx0}LEsFF*dj2J=6(wD13755S}U)yaQLzW)-}zr^+5lE8nf
z@xQd|U*h_2N#MWL_+Q%f|C_iF|A)(MW)I*#cfg@u%QD^roN-uVSt;>E~@IWe3I$zp=x$F
zB-|_~o35&T&{+~1{(*r(xt2VyDx72?^uR&F{j{xw@s7nt!lhCZz3_m++P1u;L=&RW
z{l||R(`Dvv;j;w(_*B!l(Sps4{o#0e_x0y>uZ?RTMd6q{>_R#TdKu+#m4yH8SIdu-
z6s}0i!Nkt889iSNK`}74FzQgpx&snjvm`Rgh@d-#!E=Ncq!h>k2|iQ28f*{LhgCJ*
zg5opna1~IEJL|gv*y5GnSKx;00(8+--tt9u?H6_-h`e7;3%sJHyK_o)%?!KuW6eZ#
z(j{GNK|&hSn-tn$RQ3@v?pgqu5aKJnLf!OAY7_AfJB4|#!O>7~gf
zhCj4Iv5n(ziW=Hx2-kw{dH0e`+rlH>mJoZKK%Ee@hH&3rO%@;%MFfKfx~2gM^s}AN
zL+rs_+#YODT|-$~rFwiv6D_OkZFBt@UL`9+dO>%QJgXu;QIXRZ!E&;SG+=8
zqSXqtwZtU~sQNI!K#^=e12bTe^}JN|H*euD?*jRzqj2CE95M{z-wbQP8VF687w8vw
z?_CC#vQXY3DG>~CQtHp*h0VSEZ;$p&ezu4mYW}Z!0}|auNjBkk3?Ro*<|Q;u=o^$3
zl$m;^tM>dN{(MF4OQV$+qJSZshZ1Cb;vfPu;l6A{Ip`Avm)7ab0@3r>BjI}~M0y39ga0fTT&w(f!I{-|&8!ps%bz4)WnKBvn
z)P(Cz?~P$|CuNgHnk$XD^0Vwn440H-k-03E>(U4E97=d~bkl2v$SO)phJcB{11Cz4
zBbeGQ+^KCyc{}U^@3p5K^F8q7%L>!{g7FHr%eNK?3kxUzB06da&cT-t7oTGuVIUl~
zK(iIA=Fx>aZW8PDSez)`RF_(+<_*QIi6@ZlNoV5qOB`b=QeXMm0>CWc2X!!Gu||Zd
zVlM45M`bZ(BN(f&5}&
z8tNb*)V`6&PzauM>KlUT*ZcugAd9iST=%G^`cmYEf2NENEIv4kdT*CW$5yC;meO-Lp;fG5?~VVg44^mui}pS7
zY`8{(58bXS+Dc;Glc!c5?e}8N1@7bR)mDp5)nbnAN^RJAyw@1m*?jQun|?jXrG;sE
zoiLy-&>+6?8L7ADTDnWTw3=7V*`nb|0>mvvGvhi-=co0m4Pv$pwFu3x2w<~kSZH@a
zV;|-KOx63fp50PH2zr^cm}S&`Zy3r+*k9Y9s}|RIKPR5|w)yykKEhx)O!#}W-yY76
zqU9_RAJkF1RC>LIjv{t&<3MKQnW}ifKz5>L5Exo2c5hkU1rK~Yh7!03>@Y0S%v0E>
zb_}4N4Sy4`t_#ZXJeS84s#x=4uiG2zUi0$B4FRE>T*Q}!To<^CH;6ZWw72Gh!wu8j
zzNz@lP%NVd|Cr3|8lqesTi~|7COZ;iyTV6CgMMd1=#OyjzxKVs4
zSMAhi!3Ts*%r+RuWg==y?&m%CTRPc?*&h%8TD*O!nP)vhr~(Q70?Lz4twgD<0k}M0LB~yz^Zqcb@(>iFB6Cwy
zOK_LT5b9R*hHrhx-Lj3RtE=nO`OL;y+1_kKy(5*RAi8xq#``n-@^f|mdsE&L4UBM(
z5Xd`Adzt>|;-5Z|{4L;3r&6WkE^bIjHAH`=+*~hQR9g}9^8{-lNAvBaFyk3`JIO06
zPfZ2{K+-pD=F~gyE2vlL4s%QhY^MwR`925?Wmh>5kU;8rHF-y+!EX4WMz4a1G=hE1
z+zSq$Bk}du+TG8uvIM=(<~e`X)v-U{Z}??5yPYmqDW9dIdPB<%l^6h)*HssE5!e%p
z|BJh#$%gxV|Wde_{bWh#Ep)$QmkW2V*IoQ!tY`4$BHV-tXo^
zI?&3c^w>484W_V}XbWn{&v@)-y7TU5IG2=P>zqHM-hbPVP=7+PBE5dMA@uI#e3*(?
z6dFD(&Q=f=6|LXfaclRHO=el+bKV=R>VCPB6){i&&w!~*3$yYpXJd&E2PkyFC0_0K
zHjiJjnclS9&cj~=%bqsg29K+|v(We8&?RD}<7sNWAHPZdyfPUrQd6(uC1`SpbclH(
zZ1Iwec~lgBb+Oz1lPH%h)VVZ@X4t&KT%7Su=6pivdDQLob^%T($)CrcQAJizV(QBg
zd^}+K4pBSsCoK{KNCVPRXX<`6Ih4t?KP5ouM@zx8zOT7&$g8WX_wMT{DfNWWX8m^G
z@EJ;C*6C40N}umB!pdQ54;q3&(?=rfgy^v<&QyPXxt>`0es{9;HF
z?gMNf99f_jP=z$}BettJ!se&QqmF~P$hd{i_h}QdTa*ibZhgyNON+DqWV)|QOG-*k
z)2x?keB2vLmx}T1X7=aFvVM_z_rZC4#sm$K^5i}}?q^T@9vzii%rMp3o0k2!JAh}v
zDZH=2wi*V&x1m@NfX>>CRpIL(OJD?zaiQ$uy1cW~-d9avfV)h81;DtRuL~Y7#3UvT
z0F&u)*18{w=KDZK%!YB5hYg|CjabD;0{#^US1ybD(QRjMkH%XefV>8fBj{quB73X5
z8|p>mADsox==#=Eqc4joqp*6=RV{8Wu&uRGR*GQ8Atj04FZ+_gjML}LW~$yDqVk~$!_Ifu3Y0FFsNHB
znKbwQSiKh}nBwYc9BG%2iNJw9r$iNwgsM#T|^HHAjjbfX1BhMTJuXfjem^ovQsrjUsYvsfdxKMV;kXFNiDzd87I
zb2uZW=c6Gde(m#WbYbS?q|MOLyDj1cRgHgKYO@OaOssJ=Hi%PVAFgowhRH=1$>ep!?a=$)iFI?orVrI7
zJDol}!)cI!qV(uK`?JCPLhrF6B&+G{udc1WkmjY1y83#078cYFd8j*wAD7fJ<_dkk
z89i|G)pVSx25dLEH#KC;qoiv!5kbab@7!{N=d<&si>Ir^x;Z?nj!?t1<&?w6w|{Sg
z%Usb&1$g_JWc7cDI7JJ#8&05t#@jZlxwzAr>940;Gl7k)-h7M@kach#{wcD&8_0MW
zLdN_KA_+