From fdedd5e67e7389fc9984046d1337aff09d0d461d Mon Sep 17 00:00:00 2001 From: Alec Embke Date: Sat, 30 Nov 2024 15:14:00 -0800 Subject: [PATCH] 10.0.0 (#314) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: refactor routing task for perf improvements * feat: use max_command_queue_len for backpressure * feat: remove auto_pipeline config * feat: add i-hexpire feature flag * feat: rename types * fix: enable-rustls-ring in process_stream * feat: add scan_page interface * feat: make on_* event functions async * feat: withscore arg for ZRANK and ZREVRANK * feat: cancel scan streams * feat: support replicas with options, expire options * feat: make specialize-into-bytes ff default --------- Co-authored-by: 埃拉 Co-authored-by: ArtemIsmagilov --- CHANGELOG.md | 48 + CONTRIBUTING.md | 25 +- Cargo.toml | 15 +- README.md | 131 +- bin/benchmark/.gitignore | 5 +- bin/benchmark/Cargo.toml | 43 +- bin/benchmark/README.md | 73 +- bin/benchmark/cli.yml | 15 +- bin/benchmark/run.sh | 11 +- bin/benchmark/src/_fred.rs | 25 +- bin/benchmark/src/_redis.rs | 111 +- bin/benchmark/src/main.rs | 131 +- bin/benchmark/valgrind.sh | 8 + bin/inf_loop/Cargo.toml | 5 +- bin/inf_loop/docker-compose.yml | 2 +- bin/inf_loop/src/main.rs | 38 +- bin/replica_consistency/src/main.rs | 7 +- examples/axum.rs | 11 +- examples/basic.rs | 27 +- examples/blocking.rs | 6 +- examples/client_tracking.rs | 11 +- examples/custom.rs | 4 +- examples/dns.rs | 4 +- examples/events.rs | 8 +- examples/glommio.rs | 8 +- examples/keyspace.rs | 18 +- examples/lua.rs | 18 +- examples/misc.rs | 26 +- examples/monitor.rs | 8 +- examples/pipeline.rs | 10 +- examples/pool.rs | 4 +- examples/pubsub.rs | 10 +- examples/redis_json.rs | 2 +- examples/replicas.rs | 22 +- examples/scan.rs | 84 +- examples/sentinel.rs | 6 +- examples/serde_json.rs | 4 +- examples/streams.rs | 6 +- examples/tls.rs | 8 +- examples/transactions.rs | 10 +- src/clients/{redis.rs => client.rs} | 173 +- src/clients/mod.rs | 6 +- src/clients/options.rs | 34 +- src/clients/pipeline.rs | 71 +- src/clients/pool.rs | 188 +- src/clients/pubsub.rs | 56 +- src/clients/replica.rs | 95 +- src/clients/sentinel.rs | 20 +- src/clients/transaction.rs | 115 +- src/commands/impls/acl.rs | 38 +- src/commands/impls/client.rs | 44 +- src/commands/impls/cluster.rs | 84 +- src/commands/impls/config.rs | 10 +- src/commands/impls/geo.rs | 78 +- src/commands/impls/hashes.rs | 158 +- src/commands/impls/hyperloglog.rs | 24 +- src/commands/impls/keys.rs | 252 +- src/commands/impls/lists.rs | 180 +- src/commands/impls/lua.rs | 141 +- src/commands/impls/memory.rs | 14 +- src/commands/impls/mod.rs | 71 +- src/commands/impls/pubsub.rs | 71 +- src/commands/impls/redis_json.rs | 189 +- src/commands/impls/redisearch.rs | 186 +- src/commands/impls/scan.rs | 217 +- src/commands/impls/sentinel.rs | 79 +- src/commands/impls/server.rs | 121 +- src/commands/impls/sets.rs | 99 +- src/commands/impls/slowlog.rs | 14 +- src/commands/impls/sorted_sets.rs | 255 +- src/commands/impls/streams.rs | 142 +- src/commands/impls/timeseries.rs | 151 +- src/commands/impls/tracking.rs | 52 +- src/commands/interfaces/acl.rs | 52 +- src/commands/interfaces/client.rs | 92 +- src/commands/interfaces/cluster.rs | 77 +- src/commands/interfaces/config.rs | 20 +- src/commands/interfaces/geo.rs | 94 +- src/commands/interfaces/hashes.rs | 195 +- src/commands/interfaces/hyperloglog.rs | 24 +- src/commands/interfaces/keys.rs | 298 +-- src/commands/interfaces/lists.rs | 192 +- src/commands/interfaces/lua.rs | 108 +- src/commands/interfaces/memory.rs | 26 +- src/commands/interfaces/pubsub.rs | 54 +- src/commands/interfaces/redis_json.rs | 136 +- src/commands/interfaces/redisearch.rs | 148 +- src/commands/interfaces/sentinel.rs | 86 +- src/commands/interfaces/server.rs | 39 +- src/commands/interfaces/sets.rs | 106 +- src/commands/interfaces/slowlog.rs | 14 +- src/commands/interfaces/sorted_sets.rs | 270 ++- src/commands/interfaces/streams.rs | 254 +- src/commands/interfaces/timeseries.rs | 172 +- src/commands/interfaces/tracking.rs | 12 +- src/error.rs | 221 +- src/interfaces.rs | 129 +- src/lib.rs | 54 +- src/macros.rs | 2 +- src/modules/backchannel.rs | 177 +- src/modules/inner.rs | 258 ++- src/modules/mocks.rs | 92 +- src/modules/response.rs | 652 +++--- src/monitor/mod.rs | 16 +- src/monitor/parser.rs | 32 +- src/monitor/utils.rs | 82 +- src/protocol/cluster.rs | 238 +- src/protocol/codec.rs | 51 +- src/protocol/command.rs | 2174 ++++++++---------- src/protocol/connection.rs | 807 +++---- src/protocol/debug.rs | 61 +- src/protocol/hashers.rs | 36 +- src/protocol/public.rs | 210 -- src/protocol/responders.rs | 198 +- src/protocol/tls.rs | 25 +- src/protocol/types.rs | 119 +- src/protocol/utils.rs | 376 ++- src/router/centralized.rs | 188 +- src/router/clustered.rs | 761 +++--- src/router/commands.rs | 676 +++--- src/router/connections.rs | 389 ++++ src/router/mod.rs | 1106 +++------ src/router/replicas.rs | 305 +-- src/router/responses.rs | 97 +- src/router/sentinel.rs | 161 +- src/router/transactions.rs | 665 ++---- src/router/types.rs | 158 +- src/router/utils.rs | 699 +++--- src/runtime/_tokio.rs | 238 +- src/runtime/glommio/broadcast.rs | 6 +- src/runtime/glommio/interfaces.rs | 107 +- src/runtime/glommio/io_compat.rs | 26 +- src/runtime/glommio/mod.rs | 14 +- src/runtime/glommio/mpsc.rs | 66 +- src/trace/README.md | 1 - src/trace/disabled.rs | 10 +- src/trace/enabled.rs | 24 +- src/types/args.rs | 1024 ++++----- src/types/builder.rs | 60 +- src/types/client.rs | 32 +- src/types/cluster.rs | 18 +- src/types/{misc.rs => common.rs} | 289 +-- src/types/config.rs | 397 ++-- src/types/from_tuple.rs | 8 +- src/types/geo.rs | 29 +- src/types/mod.rs | 82 +- src/types/multiple.rs | 24 +- src/types/redisearch.rs | 17 +- src/types/scan.rs | 121 +- src/types/scripts.rs | 158 +- src/types/sorted_sets.rs | 73 +- src/types/streams.rs | 100 +- src/types/timeseries.rs | 51 +- src/utils.rs | 341 ++- tests/README.md | 9 +- tests/docker/compose/base.yml | 4 +- tests/docker/runners/bash/all-features.sh | 2 +- tests/docker/runners/bash/redis-stack.sh | 2 +- tests/docker/runners/images/debug.dockerfile | 5 +- tests/integration/acl/mod.rs | 22 +- tests/integration/centralized.rs | 11 +- tests/integration/client/mod.rs | 2 +- tests/integration/cluster/mod.rs | 6 +- tests/integration/clustered.rs | 16 +- tests/integration/geo/mod.rs | 45 +- tests/integration/hashes/mod.rs | 62 +- tests/integration/hyperloglog/mod.rs | 8 +- tests/integration/keys/mod.rs | 157 +- tests/integration/lists/mod.rs | 117 +- tests/integration/lua/mod.rs | 116 +- tests/integration/memory/mod.rs | 18 +- tests/integration/multi/mod.rs | 28 +- tests/integration/other/mod.rs | 405 ++-- tests/integration/pool/mod.rs | 44 +- tests/integration/pubsub/mod.rs | 38 +- tests/integration/redis_json/mod.rs | 64 +- tests/integration/redisearch/mod.rs | 81 +- tests/integration/scanning/mod.rs | 114 +- tests/integration/server/mod.rs | 39 +- tests/integration/sets/mod.rs | 98 +- tests/integration/slowlog/mod.rs | 8 +- tests/integration/sorted_sets/mod.rs | 185 +- tests/integration/streams/mod.rs | 174 +- tests/integration/timeseries/mod.rs | 68 +- tests/integration/tracking/mod.rs | 35 +- tests/integration/utils.rs | 363 ++- tests/runners/docker-bash.sh | 4 +- 187 files changed, 11190 insertions(+), 12596 deletions(-) create mode 100755 bin/benchmark/valgrind.sh rename src/clients/{redis.rs => client.rs} (74%) delete mode 100644 src/protocol/public.rs create mode 100644 src/router/connections.rs rename src/types/{misc.rs => common.rs} (79%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7fcf37c9..31176c19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,51 @@ +## 10.0.0 + +* Reduced memory footprint and significant write throughput improvements +* Rename common interfaces to remove `Redis` prefixes +* Add `WITHSCORES` to `ZRANK` and `ZREVRANK` +* Add `GT|LT|NX|XX` options to `EXPIRE` and `EXPIREAT` +* Add `scan_page` interface +* Add optional message to `PING` +* Remove deprecated or redundant config options +* Refactor public types into submodules +* Add `i-hexpire` feature flag +* Support async blocks in `on_*` event handler functions +* Add an interface to cancel scanning functions +* Update `rustls-native-certs` to 0.8 +* Support `valkey://` scheme in `Config::from_url`. +* Support combining `Options` and `Replicas` clients + +### Upgrading from 9.x + +This update contains some significant performance improvements in the form of reduced Tokio scheduling overhead and +reduced memory usage during the frame encoding process. It also contains several cosmetic changes designed to support +future scenarios where Valkey and Redis start to diverge from one another. + +### Notable Breaking Changes + +The compiler should guide callers through most of these changes. + +* The `auto_pipeline` config option was removed. All clients now automatically pipeline commands across Tokio tasks. +* The `BackpressureConfig` struct was removed. Callers should use `max_command_buffer_len` instead. +* The `HEXPIRE`, `HTTL`, etc, commands are now gated by an `i-hexpire` feature flag. Note that this requires Redis > + =7.2.5. +* Some potentially redundant `ReplicaConfig` fields were removed. The client now uses the associated `ReconnectPolicy` + fields instead, where applicable. +* The `types` module was becoming too large and needed refactoring. Many types were moved to submodules, which likely + requires changes to certain import statements. +* Many of the common public types were renamed to remove the `Redis` prefix, such as `RedisConfig`, `RedisClient`, + `RedisPool`, etc. +* `rustls-native-certs` was upgraded to 8.x. +* The `specialize-into-bytes` feature flag was removed. This is now the default behavior. +* The `on_error` and `error_rx` event handler interface now contains an optional server identifier. + +### Behavior Changes + +* In the past `fred` spawned a separate task per connection in order to read from all sockets concurrently. In 10.0.0 + each client reads and writes to all connections in a single task. +* Write throughput is improved by a factor of 3-5x depending on the use case. +* All transactions are now pipelined automatically. + ## 9.4.0 * Change scanning functions to automatically continue when the current page is dropped diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 91961a19..e48f2a05 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,8 +5,8 @@ This document gives some background on how the library is structured and how to ## General * Run rustfmt and clippy before submitting any changes. - * Formatting should adhere to [rustfmt.toml](./rustfmt.toml), i.e. by running `cargo +nightly fmt --all` - * VS Code users should use the checked-in settings in the [.vscode](./.vscode) directory + * Formatting should adhere to [rustfmt.toml](./rustfmt.toml), i.e. by running `cargo +nightly fmt --all` + * VS Code users should use the checked-in settings in the [.vscode](./.vscode) directory * Please use [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary). ## File Structure @@ -43,7 +43,7 @@ The code has the following structure: This example shows how to add `MGET` to the commands. -1. Add the new variant to the `RedisCommandKind` enum, if needed. +1. Add the new variant to the `CommandKind` enum, if needed. ```rust pub enum RedisCommandKind { @@ -80,7 +80,7 @@ This example shows how to add `MGET` to the commands. 2. Create the private function implementing the command in [src/commands/impls/keys.rs](src/commands/impls/keys.rs). ```rust - pub async fn mget(client: &C, keys: MultipleKeys) -> Result { + pub async fn mget(client: &C, keys: MultipleKeys) -> Result { // maybe do some kind of validation utils::check_empty_keys(&keys)?; @@ -94,10 +94,10 @@ This example shows how to add `MGET` to the commands. } ``` - Or use one of the shorthand helper functions or macros. + Or use one of the shorthand helper functions or macros. ```rust - pub async fn mget(client: &C, keys: MultipleKeys) -> Result { + pub async fn mget(client: &C, keys: MultipleKeys) -> Result { utils::check_empty_keys(&keys)?; args_values_cmd(client, keys.into_values()).await } @@ -133,13 +133,13 @@ This example shows how to add `MGET` to the commands. 4. Implement the interface on the client structs, if needed. - In the [RedisClient](src/clients/redis.rs) file. + In the [Client](src/clients/redis.rs) file. ```rust - impl KeysInterface for RedisClient {} + impl KeysInterface for Client {} ``` - In the [transaction](src/clients/transaction.rs) file. + In the [transaction](src/clients/transaction.rs) file. ```rust impl KeysInterface for Transaction {} @@ -155,8 +155,8 @@ Using `MGET` as an example again: 1. Write tests in the [keys](tests/integration/keys/mod.rs) file. ```rust - pub async fn should_mget_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let expected: Vec<(&str, RedisValue)> = vec![("a{1}", 1.into()), ("b{1}", 2.into()), ("c{1}", 3.into())]; + pub async fn should_mget_values(client: Client, _: Config) -> Result<(), RedisError> { + let expected: Vec<(&str, Value)> = vec![("a{1}", 1.into()), ("b{1}", 2.into()), ("c{1}", 3.into())]; for (key, value) in expected.iter() { let _: () = client.set(key, value.clone(), None, None, false).await?; } @@ -186,8 +186,7 @@ Using `MGET` as an example again: } ``` -These macros will generate test wrapper functions to call your test 8 times based on the following options: +These macros will generate test wrapper functions to call your test 4 times based on the following options: * Clustered vs centralized deployments -* Pipelined vs non-pipelined clients * RESP2 vs RESP3 protocol modes diff --git a/Cargo.toml b/Cargo.toml index ac9c01a9..03892aeb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,13 +11,14 @@ name = "fred" readme = "README.md" repository = "https://github.com/aembke/fred.rs" rust-version = "1.75" -version = "9.4.0" +version = "10.0.0" [package.metadata.docs.rs] # do not show the glommio version of the docs features = [ "i-all", "i-redis-stack", + "i-hexpire", "transactions", "blocking-encoding", "dns", @@ -34,7 +35,6 @@ features = [ "enable-native-tls", "full-tracing", "credential-provider", - "specialize-into-bytes" ] rustdoc-args = ["--cfg", "docsrs"] @@ -46,7 +46,6 @@ test = true [features] default = ["transactions", "i-std"] -specialize-into-bytes = [] blocking-encoding = ["tokio/rt-multi-thread"] custom-reconnect-errors = [] default-nil-types = [] @@ -156,6 +155,7 @@ i-redis-json = ["serde-json"] i-redis-stack = ["i-redis-json", "i-time-series", "i-redisearch"] i-redisearch = ["i-sorted-sets", "i-geo", "i-hashes"] i-time-series = [] +i-hexpire = [] # Full and partial tracing full-tracing = ["partial-tracing"] @@ -170,17 +170,16 @@ arc-swap = "1.7" async-trait = { version = "0.1" } bytes = "1.6" bytes-utils = "0.1.3" -crossbeam-queue = "0.3" -float-cmp = "0.9" +float-cmp = "0.10" futures = { version = "0.3", features = ["std"] } log = "0.4" native-tls = { version = "0.2", optional = true } nom = { version = "7.1", optional = true } parking_lot = "0.12" rand = "0.8" -redis-protocol = { version = "5.0.1", features = ["resp2", "resp3", "bytes"] } +redis-protocol = { version = "6.0.0", features = ["resp2", "resp3", "bytes"] } rustls = { version = "0.23", optional = true, default-features = false } -rustls-native-certs = { version = "0.7", optional = true } +rustls-native-certs = { version = "0.8", optional = true } semver = "1.0" serde_json = { version = "1", optional = true } sha-1 = { version = "0.10", optional = true } @@ -220,11 +219,9 @@ local-sync = { version = "0.1.1", optional = true } [dev-dependencies] axum = { version = "0.7", features = ["macros"] } -base64 = "0.22.0" maplit = "1.0" pretty_env_logger = "0.5" serde = { version = "1.0", features = ["derive"] } -subprocess = "0.2" tokio-stream = { version = "0.1", features = ["sync"] } [[example]] diff --git a/README.md b/README.md index 11e73685..680829b2 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Fred [![Crates.io](https://img.shields.io/crates/v/fred.svg)](https://crates.io/crates/fred) [![API docs](https://docs.rs/fred/badge.svg)](https://docs.rs/fred) -An async client for Redis and Valkey +An async client for Valkey and Redis ## Example @@ -15,10 +15,24 @@ An async client for Redis and Valkey use fred::prelude::*; #[tokio::main] -async fn main() -> Result<(), RedisError> { - let client = RedisClient::default(); +async fn main() -> Result<(), Error> { + let config = Config::from_url("redis://localhost:6379/1")?; + let client = Builder::from_config(config) + .with_connection_config(|config| { + config.connection_timeout = Duration::from_secs(5); + config.tcp = TcpConfig { + nodelay: Some(true), + ..Default::default() + }; + }) + .build()?; client.init().await?; + client.on_error(|(error, server)| async move { + println!("{:?}: Connection error: {:?}", server, error); + Ok(()) + }); + // convert responses to many common Rust types let foo: Option = client.get("foo").await?; assert!(foo.is_none()); @@ -58,69 +72,76 @@ See the build features for more information. ## Client Features -| Name | Default | Description | -|---------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `transactions` | x | Enable a [Transaction](https://redis.io/docs/interact/transactions/) interface. | -| `enable-native-tls` | | Enable TLS support via [native-tls](https://crates.io/crates/native-tls). | -| `enable-rustls` | | Enable TLS support via [rustls](https://crates.io/crates/rustls) with the default crypto backend features. | -| `enable-rustls-ring` | | Enable TLS support via [rustls](https://crates.io/crates/rustls) and the ring crypto backend. | -| `vendored-openssl` | | Enable the `native-tls/vendored` feature. | -| `metrics` | | Enable the metrics interface to track overall latency, network latency, and request/response sizes. | -| `full-tracing` | | Enable full [tracing](./src/trace/README.md) support. This can emit a lot of data. | -| `partial-tracing` | | Enable partial [tracing](./src/trace/README.md) support, only emitting traces for top level commands and network latency. | -| `blocking-encoding` | | Use a blocking task for encoding or decoding frames. This can be useful for clients that send or receive large payloads, but requires a multi-thread Tokio runtime. | -| `custom-reconnect-errors` | | Enable an interface for callers to customize the types of errors that should automatically trigger reconnection logic. | -| `monitor` | | Enable an interface for running the `MONITOR` command. | -| `sentinel-client` | | Enable an interface for communicating directly with Sentinel nodes. This is not necessary to use normal Redis clients behind a sentinel layer. | -| `sentinel-auth` | | Enable an interface for using different authentication credentials to sentinel nodes. | -| `subscriber-client` | | Enable a subscriber client interface that manages channel subscription state for callers. | -| `serde-json` | | Enable an interface to automatically convert Redis types to JSON via `serde-json`. | -| `mocks` | | Enable a mocking layer interface that can be used to intercept and process commands in tests. | -| `dns` | | Enable an interface that allows callers to override the DNS lookup logic. | -| `replicas` | | Enable an interface that routes commands to replica nodes. | -| `default-nil-types` | | Enable a looser parsing interface for `nil` values. | -| `sha-1` | | Enable an interface for hashing Lua scripts. | -| `unix-sockets` | | Enable Unix socket support. | -| `credential-provider` | | Enable an interface that can dynamically load auth credentials at runtime. | -| `specialize-into-bytes` | | Specialize `TryFrom for RedisValue>` to use `RedisValue::Bytes`, disabling `From for RedisValue`. This is a temporary feature flag that will be made the default in the next major version. | -| `glommio` | | Enable experimental [Glommio](https://github.com/DataDog/glommio) support. | +| Name | Default | Description | +|---------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `transactions` | x | Enable a [Transaction](https://redis.io/docs/interact/transactions/) interface. | +| `enable-native-tls` | | Enable TLS support via [native-tls](https://crates.io/crates/native-tls). | +| `enable-rustls` | | Enable TLS support via [rustls](https://crates.io/crates/rustls) with the default crypto backend features. | +| `enable-rustls-ring` | | Enable TLS support via [rustls](https://crates.io/crates/rustls) and the ring crypto backend. | +| `vendored-openssl` | | Enable the `native-tls/vendored` feature. | +| `metrics` | | Enable the metrics interface to track overall latency, network latency, and request/response sizes. | +| `full-tracing` | | Enable full [tracing](./src/trace/README.md) support. This can emit a lot of data. | +| `partial-tracing` | | Enable partial [tracing](./src/trace/README.md) support, only emitting traces for top level commands and network latency. | +| `blocking-encoding` | | Use a blocking task for encoding or decoding frames. This can be useful for clients that send or receive large payloads, but requires a multi-thread Tokio runtime. | +| `custom-reconnect-errors` | | Enable an interface for callers to customize the types of errors that should automatically trigger reconnection logic. | +| `monitor` | | Enable an interface for running the `MONITOR` command. | +| `sentinel-client` | | Enable an interface for communicating directly with Sentinel nodes. This is not necessary to use normal Redis clients behind a sentinel layer. | +| `sentinel-auth` | | Enable an interface for using different authentication credentials to sentinel nodes. | +| `subscriber-client` | | Enable a subscriber client interface that manages channel subscription state for callers. | +| `serde-json` | | Enable an interface to automatically convert Redis types to JSON via `serde-json`. | +| `mocks` | | Enable a mocking layer interface that can be used to intercept and process commands in tests. | +| `dns` | | Enable an interface that allows callers to override the DNS lookup logic. | +| `replicas` | | Enable an interface that routes commands to replica nodes. | +| `default-nil-types` | | Enable a looser parsing interface for `nil` values. | +| `sha-1` | | Enable an interface for hashing Lua scripts. | +| `unix-sockets` | | Enable Unix socket support. | +| `credential-provider` | | Enable an interface that can dynamically load auth credentials at runtime. | +| `glommio` | | Enable experimental [Glommio](https://github.com/DataDog/glommio) support. | ## Interface Features The command interfaces have many functions and compile times can add up quickly. Interface features begin with `i-` and control which public interfaces are built. -| Name | Default | Description | -|-----------------|---------|---------------------------------------------------------------------------------------------------------------------------| -| `i-all` | | Enable the interfaces included with a basic Redis or Valkey installation. This does not include `i-redis-stack` features. | -| `i-std` | x | Enable the common data structure interfaces (lists, sets, streams, keys, etc). | -| `i-acl` | | Enable the ACL command interface. | -| `i-client` | | Enable the CLIENT command interface. | -| `i-cluster` | | Enable the CLUSTER command interface. | -| `i-config` | | Enable the CONFIG command interface. | -| `i-geo` | | Enable the GEO command interface. | -| `i-hashes` | | Enable the hashes (HGET, etc) command interface. | -| `i-hyperloglog` | | Enable the hyperloglog command interface. | -| `i-keys` | | Enable the main keys (GET, SET, etc) command interface. | -| `i-lists` | | Enable the lists (LPUSH, etc) command interface. | -| `i-scripts` | | Enable the scripting command interfaces. | -| `i-memory` | | Enable the MEMORY command interfaces. | -| `i-pubsub` | | Enable the publish-subscribe command interfaces. | -| `i-server` | | Enable the server control (SHUTDOWN, BGSAVE, etc) interfaces. | -| `i-sets` | | Enable the sets (SADD, etc) interface. | -| `i-sorted-sets` | | Enable the sorted sets (ZADD, etc) interface. | -| `i-slowlog` | | Enable the SLOWLOG interface. | -| `i-streams` | | Enable the streams (XADD, etc) interface. | -| `i-tracking` | | Enable a [client tracking](https://redis.io/docs/manual/client-side-caching/) interface. | -| `i-time-series` | | Enable a [Redis Timeseries](https://redis.io/docs/data-types/timeseries/) interface. | -| `i-redis-json` | | Enable a [RedisJSON](https://github.com/RedisJSON/RedisJSON) interface. | -| `i-redisearch` | | Enable a [RediSearch](https://github.com/RediSearch/RediSearch) interface. | -| `i-redis-stack` | | Enable the [Redis Stack](https://github.com/redis-stack) interfaces (`i-redis-json`, `i-time-series`, etc). | +| Name | Default | Description | +|-----------------|---------|------------------------------------------------------------------------------------------| +| `i-all` | | Enable the interfaces described in this table. | +| `i-std` | x | Enable the common data structure interfaces (lists, sets, streams, keys, etc). | +| `i-acl` | | Enable the ACL command interface. | +| `i-client` | | Enable the CLIENT command interface. | +| `i-cluster` | | Enable the CLUSTER command interface. | +| `i-config` | | Enable the CONFIG command interface. | +| `i-geo` | | Enable the GEO command interface. | +| `i-hashes` | | Enable the hashes (HGET, etc) command interface. | +| `i-hyperloglog` | | Enable the hyperloglog command interface. | +| `i-keys` | | Enable the main keys (GET, SET, etc) command interface. | +| `i-lists` | | Enable the lists (LPUSH, etc) command interface. | +| `i-scripts` | | Enable the scripting command interfaces. | +| `i-memory` | | Enable the MEMORY command interfaces. | +| `i-pubsub` | | Enable the publish-subscribe command interfaces. | +| `i-server` | | Enable the server control (SHUTDOWN, BGSAVE, etc) interfaces. | +| `i-sets` | | Enable the sets (SADD, etc) interface. | +| `i-sorted-sets` | | Enable the sorted sets (ZADD, etc) interface. | +| `i-slowlog` | | Enable the SLOWLOG interface. | +| `i-streams` | | Enable the streams (XADD, etc) interface. | +| `i-tracking` | | Enable a [client tracking](https://redis.io/docs/manual/client-side-caching/) interface. | If a specific high level command function is not supported callers can use the `custom` function as a workaround until the higher level interface is added. See the [custom](https://github.com/aembke/fred.rs/blob/main/examples/custom.rs) example for more info. +### Redis Features + +Features currently specific to Redis, typically versions >=7.2.5: + +| Name | Default | Description | +|-----------------|---------|-------------------------------------------------------------------------------------------------------------| +| `i-time-series` | | Enable a [Redis Timeseries](https://redis.io/docs/data-types/timeseries/) interface. | +| `i-redis-json` | | Enable a [RedisJSON](https://github.com/RedisJSON/RedisJSON) interface. | +| `i-redisearch` | | Enable a [RediSearch](https://github.com/RediSearch/RediSearch) interface. | +| `i-redis-stack` | | Enable the [Redis Stack](https://github.com/redis-stack) interfaces (`i-redis-json`, `i-time-series`, etc). | +| `i-hexpire` | | Enable the hashmap expiration interface (`HEXPIRE`, `HTTL`, etc). | + ## Debugging Features | Name | Default | Description | diff --git a/bin/benchmark/.gitignore b/bin/benchmark/.gitignore index 01916099..534db70f 100644 --- a/bin/benchmark/.gitignore +++ b/bin/benchmark/.gitignore @@ -2,4 +2,7 @@ target Cargo.lock perf* -server.log \ No newline at end of file +server.log +callgrind* +flamegraph* +output.svg \ No newline at end of file diff --git a/bin/benchmark/Cargo.toml b/bin/benchmark/Cargo.toml index e6a4d531..e3f4c1ba 100644 --- a/bin/benchmark/Cargo.toml +++ b/bin/benchmark/Cargo.toml @@ -8,15 +8,22 @@ description = "A benchmarking script based on the `redis-benchmark` tool include [profile.release] debug = true +[target.x86_64-unknown-linux-gnu] +linker = "/usr/bin/clang" +rustflags = ["-Clink-arg=-fuse-ld=lld", "-Clink-arg=-Wl,--no-rosegment"] + +[build] +rustflags = ["--cfg", "tokio_unstable"] + [dependencies] clap = { version = "2.33", features = ["yaml"] } -opentelemetry = { version = "0.18.0", features = ["rt-tokio", "trace"] } -opentelemetry-jaeger = { version = "0.17.0", features = ["tokio", "isahc_collector_client", "isahc", "collector_client", "rt-tokio"] } -tracing-attributes = "0.1.23" -tracing-opentelemetry = "0.18.0" -tracing-core = "0.1.30" -tracing-subscriber = "0.3.16" -tracing = "0.1.37" +opentelemetry = { version = "0.18.0", optional = true, features = ["rt-tokio", "trace"] } +opentelemetry-jaeger = { version = "0.17.0", optional = true, features = ["tokio", "isahc_collector_client", "isahc", "collector_client", "rt-tokio"] } +tracing-attributes = { version = "0.1.23", optional = true } +tracing-opentelemetry = { version = "0.18.0", optional = true } +tracing-core = { version = "0.1.30", optional = true } +tracing-subscriber = { version = "0.3.16", optional = true } +tracing = { version = "0.1.37", optional = true } log = "0.4" pretty_env_logger = "0.5" tokio = { version = "1", features = ["full"] } @@ -24,21 +31,31 @@ futures = "0.3" rand = "0.8" indicatif = "=0.17.1" bb8-redis = { version = "0.17.0", optional = true } +redis = { version = "0.27.5", optional = true, features = ["connection-manager", "tokio-comp", "cluster-async"] } +console-subscriber = { version = "0.4.1", optional = true } +tokio-metrics = { version = "0.3.1", optional = true, features = ["rt"] } +dhat = { version = "0.3.3", optional = true } [dependencies.fred] -#path = "../.." -path = "/fred" +path = "../.." +#path = "/fred" +#path = "/project" features = ["replicas", "unix-sockets", "i-all"] default-features = false [features] default = [] +dhat-heap = ["dep:dhat"] +metrics = ["tokio-metrics"] +console = ["console-subscriber"] assert-expected = [] +tracing-deps = ["tracing", "tracing-subscriber", "tracing-core", "tracing-opentelemetry", "tracing-attributes", "opentelemetry", "opentelemetry-jaeger"] redis-rs = ["bb8-redis"] enable-rustls = ["fred/enable-rustls"] enable-native-tls = ["fred/enable-native-tls"] debug-ids = ["fred/debug-ids"] -stdout-tracing = ["fred/partial-tracing"] -partial-tracing = ["fred/partial-tracing"] -full-tracing = ["fred/full-tracing"] -blocking-encoding = ["fred/blocking-encoding"] \ No newline at end of file +stdout-tracing = ["fred/partial-tracing", "tracing-deps"] +partial-tracing = ["fred/partial-tracing", "tracing-deps"] +full-tracing = ["fred/full-tracing", "tracing-deps"] +blocking-encoding = ["fred/blocking-encoding"] +redis-manager = ["dep:redis"] \ No newline at end of file diff --git a/bin/benchmark/README.md b/bin/benchmark/README.md index eb4e137a..8a0e4ff3 100644 --- a/bin/benchmark/README.md +++ b/bin/benchmark/README.md @@ -22,8 +22,6 @@ concurrency (`-c`) and pool size (`-P`) argv. There are several additional features or performance tuning options that can affect these results. For example: * Tracing. Enabling the FF cut throughput by ~20% in my tests. -* Pipelining. The `auto_pipeline` feature can dramatically improve throughput in scenarios like this where a client or - pool is shared among many Tokio tasks. The original purpose of this tool was to test this particular optimization. * Clustering * Backpressure settings * Network latency @@ -63,8 +61,10 @@ Callers may have to also change `run.sh` to enable additional features in docker ## Usage ``` +A benchmarking module based on the `redis-benchmark` tool included with Redis. + USAGE: - fred_benchmark [FLAGS] [OPTIONS] [SUBCOMMAND] + fred_benchmark [FLAGS] [OPTIONS] FLAGS: --cluster Whether to assume a clustered deployment. @@ -72,23 +72,19 @@ FLAGS: -q, --quiet Only print the final req/sec measurement. --replicas Whether to use `GET` with replica nodes instead of `INCR` with primary nodes. -t, --tls Enable TLS via whichever build flag is provided. - -t, --tracing Whether to enable tracing via a local Jeager instance. See tests/docker-compose.yml to - start up a local Jaeger instance. + -T, --tracing Whether to enable tracing via a local Jeager instance. See tests/docker-compose.yml to start up a + local Jaeger instance. -V, --version Prints version information OPTIONS: -a, --auth The password/key to use. `REDIS_USERNAME` and `REDIS_PASSWORD` can also be used. + --bounded The size of the bounded mpsc channel used to route commands. [default: 0] -c, --concurrency The number of Tokio tasks used to run commands. [default: 100] -n, --commands The number of commands to run. [default: 100000] -h, --host The hostname of the redis server. [default: 127.0.0.1] -P, --pool The number of clients in the redis connection pool. [default: 1] -p, --port The port for the redis server. [default: 6379] -u, --unix-sock The path to a unix socket. - -SUBCOMMANDS: - help Prints this message or the help of the given subcommand(s) - no-pipeline Run the test without pipelining [Default]. - pipeline Run the test with pipelining. ``` ## Examples @@ -102,62 +98,19 @@ All the examples below use the following parameters: * 10_000 Tokio tasks * 15 clients in the connection pool -With `auto_pipeline` **disabled**: - -``` -$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar no-pipeline -Performed 10000000 operations in: 27.038434665s. Throughput: 369849 req/sec -``` - -With `auto_pipeline` **enabled**: - ``` -$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar pipeline -Performed 10000000 operations in: 3.728232639s. Throughput: 2682403 req/sec +$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar +Performed 10000000 operations in: 3.337158005s. Throughput: 2996703 req/sec ``` -With `auto_pipeline` **enabled** and using `GET` with replica nodes instead of `INCR` with primary nodes: +Using `GET` with replica nodes instead of `INCR` with primary nodes: ``` -$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar --replicas pipeline -erformed 10000000 operations in: 3.234255482s. Throughput: 3092145 req/sec +$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar --replicas +Performed 10000000 operations in: 1.865807963s. Throughput: 5361930 req/sec ``` -Maybe Relevant Specs: +Relevant Specs: * 32 CPUs -* 64 GB memory - -## `redis-rs` Comparison - -The `USE_REDIS_RS` environment variable can be toggled to [switch the benchmark logic](./src/_redis.rs) to -use `redis-rs` instead of `fred`. There's also an `info` level log line that can confirm this at runtime. - -The `redis-rs` variant uses the same general strategy, but with [bb8-redis](https://crates.io/crates/bb8-redis) ( -specifically `Pool`) instead of `fred::clients::RedisPool`. All the other components -in the benchmark logic are the same. - -### Examples - -These examples use the following parameters: - -* Centralized deployment via local docker -* No tracing features enabled -* No TLS -* 10_000_000 INCR commands with `assert-expected` enabled -* 10_000 Tokio tasks -* 15 clients in the connection pool - -``` -# fred without `auto_pipeline` -$ ./run.sh -h redis-main -p 6379 -a bar -n 10000000 -P 15 -c 10000 no-pipeline -Performed 10000000 operations in: 52.156700826s. Throughput: 191732 req/sec - -# redis-rs via bb8-redis -$ USE_REDIS_RS=1 ./run.sh -h redis-main -p 6379 -a bar -n 10000000 -P 15 -c 10000 -Performed 10000000 operations in: 102.953612933s. Throughput: 97131 req/sec - -# fred with `auto_pipeline` -$ ./run.sh -h redis-main -p 6379 -a bar -n 10000000 -P 15 -c 10000 pipeline -Performed 10000000 operations in: 5.74236423s. Throughput: 1741553 req/sec -``` \ No newline at end of file +* 64 GB memory \ No newline at end of file diff --git a/bin/benchmark/cli.yml b/bin/benchmark/cli.yml index 61bbfb57..fb3f2f5c 100644 --- a/bin/benchmark/cli.yml +++ b/bin/benchmark/cli.yml @@ -4,7 +4,7 @@ author: Alec Embke about: A benchmarking module based on the `redis-benchmark` tool included with Redis. args: - tracing: - short: t + short: T long: tracing help: Whether to enable tracing via a local Jeager instance. See tests/docker-compose.yml to start up a local Jaeger instance. takes_value: false @@ -16,6 +16,12 @@ args: long: replicas help: Whether to use `GET` with replica nodes instead of `INCR` with primary nodes. takes_value: false + - bounded: + long: bounded + help: The size of the bounded mpsc channel used to route commands. + takes_value: true + value_name: "NUMBER" + default_value: "0" - quiet: short: q long: quiet @@ -72,9 +78,4 @@ args: long: auth value_name: "STRING" help: The password/key to use. `REDIS_USERNAME` and `REDIS_PASSWORD` can also be used. - takes_value: true -subcommands: - - pipeline: - about: Run the test with pipelining. - - no-pipeline: - about: Run the test without pipelining [Default]. \ No newline at end of file + takes_value: true \ No newline at end of file diff --git a/bin/benchmark/run.sh b/bin/benchmark/run.sh index 9a26e3cf..e8c306a0 100755 --- a/bin/benchmark/run.sh +++ b/bin/benchmark/run.sh @@ -1,10 +1,17 @@ #!/bin/bash -[[ -z "${USE_REDIS_RS}" ]] && FEATURES="assert-expected" || FEATURES="assert-expected redis-rs" +[[ -z "${REDIS_RS_BB8}" ]] && FEATURES="assert-expected" || FEATURES="assert-expected redis-rs" +if [[ ! -z "${REDIS_RS_MANAGER}" ]]; then + FEATURES="assert-expected redis-rs redis-manager" +fi + +# echo 0 | sudo tee /proc/sys/kernel/kptr_restrict +# echo "-1" | sudo tee /proc/sys/kernel/perf_event_paranoid echo $FEATURES docker-compose -f ../../tests/docker/compose/cluster.yml \ -f ../../tests/docker/compose/centralized.yml \ - -f ../../tests/docker/compose/unix-socket.yml -f ./docker-compose.yml \ + -f ../../tests/docker/compose/unix-socket.yml \ + -f ./docker-compose.yml \ run -u $(id -u ${USER}):$(id -g ${USER}) --rm fred-benchmark cargo run --release --features "$FEATURES" -- "${@:1}" \ No newline at end of file diff --git a/bin/benchmark/src/_fred.rs b/bin/benchmark/src/_fred.rs index 99cd028e..59b00036 100644 --- a/bin/benchmark/src/_fred.rs +++ b/bin/benchmark/src/_fred.rs @@ -1,5 +1,11 @@ use crate::{utils, Argv}; -use fred::{clients::RedisPool, error::RedisError, prelude::*, types::Builder as RedisBuilder}; +use fred::{ + clients::Pool, + error::Error, + prelude::*, + types::{Builder, ClusterDiscoveryPolicy}, +}; +use futures::TryStreamExt; use indicatif::ProgressBar; use std::{ error::Error, @@ -8,14 +14,12 @@ use std::{ }; use tokio::task::JoinHandle; -use fred::types::ClusterDiscoveryPolicy; #[cfg(any( feature = "enable-rustls", feature = "enable-native-tls", feature = "enabled-rustls-ring" ))] use fred::types::{TlsConfig, TlsConnector, TlsHostMapping}; -use futures::TryStreamExt; #[cfg(feature = "enable-rustls")] fn default_tls_config() -> TlsConfig { @@ -35,7 +39,7 @@ fn default_tls_config() -> TlsConfig { pub async fn init(argv: &Arc) -> Result { let (username, password) = utils::read_auth_env(); - let config = RedisConfig { + let config = Config { fail_fast: true, server: if argv.unix.is_some() { ServerConfig::Unix { @@ -62,20 +66,16 @@ pub async fn init(argv: &Arc) -> Result { ..Default::default() }; - let pool = RedisBuilder::from_config(config) - .with_performance_config(|config| { - config.auto_pipeline = argv.pipeline; - config.backpressure.max_in_flight_commands = 100_000_000; - }) + let pool = Builder::from_config(config) .with_connection_config(|config| { + config.max_command_buffer_len = argv.bounded; config.internal_command_timeout = Duration::from_secs(5); }) .set_policy(ReconnectPolicy::new_constant(0, 500)) .build_pool(argv.pool)?; info!("Connecting to {}:{}...", argv.host, argv.port); - pool.connect(); - pool.wait_for_connect().await?; + pool.init().await?; info!("Connected to {}:{}.", argv.host, argv.port); pool.flushall_cluster().await?; @@ -84,7 +84,7 @@ pub async fn init(argv: &Arc) -> Result { fn spawn_client_task( bar: &Option, - client: &RedisClient, + client: &Client, counter: &Arc, argv: &Arc, ) -> JoinHandle<()> { @@ -113,6 +113,7 @@ fn spawn_client_task( bar.inc(1); } } + debug!("Ending client task"); }) } diff --git a/bin/benchmark/src/_redis.rs b/bin/benchmark/src/_redis.rs index 9e0f4ec5..3cd0c918 100644 --- a/bin/benchmark/src/_redis.rs +++ b/bin/benchmark/src/_redis.rs @@ -1,4 +1,5 @@ use crate::{utils, Argv}; +#[cfg(not(feature = "redis-manager"))] use bb8_redis::{ bb8::{self, Pool, PooledConnection}, redis::{cmd, AsyncCommands, ErrorKind as RedisErrorKind, RedisError}, @@ -6,7 +7,17 @@ use bb8_redis::{ }; use futures::TryStreamExt; use indicatif::ProgressBar; -use opentelemetry::trace::FutureExt; +use redis::aio::ConnectionManager; +#[cfg(feature = "redis-manager")] +use redis::{ + aio::{ConnectionManager as RedisConnectionManager, ConnectionManagerConfig, MultiplexedConnection}, + cmd, + AsyncCommands, + Client as RedisClient, + ConnectionLike, + ErrorKind as RedisErrorKind, + RedisError, +}; use std::{ error::Error, sync::{atomic::AtomicUsize, Arc}, @@ -14,6 +25,7 @@ use std::{ }; use tokio::task::JoinHandle; +#[cfg(not(feature = "redis-manager"))] async fn incr_key(pool: &Pool, key: &str) -> i64 { let mut conn = pool.get().await.map_err(utils::crash).unwrap(); cmd("INCR") @@ -24,6 +36,7 @@ async fn incr_key(pool: &Pool, key: &str) -> i64 { .unwrap() } +#[cfg(not(feature = "redis-manager"))] async fn del_key(pool: &Pool, key: &str) -> i64 { let mut conn = pool.get().await.map_err(utils::crash).unwrap(); cmd("DEL") @@ -34,6 +47,7 @@ async fn del_key(pool: &Pool, key: &str) -> i64 { .unwrap() } +#[cfg(not(feature = "redis-manager"))] fn spawn_client_task( bar: &Option, pool: &Pool, @@ -65,7 +79,7 @@ fn spawn_client_task( }) } -// TODO support clustered deployments +#[cfg(not(feature = "redis-manager"))] async fn init(argv: &Arc) -> Pool { let (username, password) = utils::read_auth_env(); let url = if let Some(password) = password { @@ -94,6 +108,7 @@ async fn init(argv: &Arc) -> Pool { pool } +#[cfg(not(feature = "redis-manager"))] pub async fn run(argv: Arc, counter: Arc, bar: Option) -> Duration { info!("Running with redis-rs"); @@ -114,3 +129,95 @@ pub async fn run(argv: Arc, counter: Arc, bar: Option i64 { + conn.incr(key, 1).await.map_err(utils::crash).unwrap() +} + +#[cfg(feature = "redis-manager")] +async fn del_key(conn: &mut ConnectionManager, key: &str) -> i64 { + conn.del(key).await.map_err(utils::crash).unwrap() +} + +#[cfg(feature = "redis-manager")] +fn spawn_client_task( + bar: &Option, + client: &ConnectionManager, + counter: &Arc, + argv: &Arc, +) -> JoinHandle<()> { + let (bar, mut client, counter, argv) = (bar.clone(), client.clone(), counter.clone(), argv.clone()); + + tokio::spawn(async move { + let key = utils::random_string(15); + let mut expected = 0; + + while utils::incr_atomic(&counter) < argv.count { + expected += 1; + let actual = incr_key(&mut client, &key).await; + + #[cfg(feature = "assert-expected")] + { + if actual != expected { + println!("Unexpected result: {} == {}", actual, expected); + std::process::exit(1); + } + } + + if let Some(ref bar) = bar { + bar.inc(1); + } + } + }) +} + +#[cfg(feature = "redis-manager")] +async fn init(argv: &Arc) -> ConnectionManager { + let (username, password) = utils::read_auth_env(); + let url = if let Some(password) = password { + let username = username.map(|s| format!("{s}:")).unwrap_or("".into()); + format!("redis://{}{}@{}:{}", username, password, argv.host, argv.port) + } else { + format!("redis://{}:{}", argv.host, argv.port) + }; + debug!("Redis conn: {}", url); + + let client = RedisClient::open(url).expect("Failed to create redis client"); + let config = ConnectionManagerConfig::new() + .set_connection_timeout(Duration::from_secs(5)) + .set_response_timeout(Duration::from_secs(5)) + .set_number_of_retries(1000) + .set_exponent_base(2); + + ConnectionManager::new_with_config(client, config) + .await + .expect("Failed to create connection manager") +} + +#[cfg(feature = "redis-manager")] +pub async fn run(argv: Arc, counter: Arc, bar: Option) -> Duration { + info!("Running with redis-rs"); + + if argv.cluster || argv.replicas { + panic!("Cluster or replica features are not supported yet with redis-rs benchmarks."); + } + if argv.pool > 1 { + panic!("Pooling is not supported with redis-manager feature."); + } + let manager = init(&argv).await; + let mut tasks = Vec::with_capacity(argv.tasks); + + info!("Starting commands..."); + let started = SystemTime::now(); + for _ in 0 .. argv.tasks { + tasks.push(spawn_client_task(&bar, &manager, &counter, &argv)); + } + futures::future::join_all(tasks).await; + + SystemTime::now() + .duration_since(started) + .expect("Failed to calculate duration") +} diff --git a/bin/benchmark/src/main.rs b/bin/benchmark/src/main.rs index 2b1c56b6..5bcdc21a 100644 --- a/bin/benchmark/src/main.rs +++ b/bin/benchmark/src/main.rs @@ -1,16 +1,11 @@ -// shh #![allow(warnings)] +#[cfg(feature = "dhat-heap")] +#[global_allocator] +static ALLOC: dhat::Alloc = dhat::Alloc; + #[macro_use] extern crate clap; -extern crate fred; -extern crate futures; -extern crate opentelemetry; -extern crate opentelemetry_jaeger; -extern crate tokio; -extern crate tracing; -extern crate tracing_opentelemetry; -extern crate tracing_subscriber; #[macro_use] extern crate log; @@ -21,6 +16,7 @@ use fred::types::TracingConfig; use clap::App; use indicatif::ProgressBar; +#[cfg(feature = "tracing-deps")] use opentelemetry::{ global, sdk::{ @@ -29,6 +25,7 @@ use opentelemetry::{ trace::{self, RandomIdGenerator, Sampler, TraceRuntime}, }, }; +#[cfg(feature = "tracing-deps")] use opentelemetry_jaeger::JaegerTraceRuntime; use std::{ default::Default, @@ -38,6 +35,7 @@ use std::{ time::{Duration, SystemTime}, }; use tokio::{runtime::Builder, task::JoinHandle, time::Instant}; +#[cfg(feature = "tracing-deps")] use tracing_subscriber::{layer::SubscriberExt, Layer, Registry}; static DEFAULT_COMMAND_COUNT: usize = 10_000; @@ -66,18 +64,18 @@ use _redis::run as run_benchmark; // TODO update clap #[derive(Debug)] struct Argv { - pub cluster: bool, + pub cluster: bool, pub replicas: bool, - pub tracing: bool, - pub count: usize, - pub tasks: usize, - pub unix: Option, - pub host: String, - pub port: u16, - pub pipeline: bool, - pub pool: usize, - pub quiet: bool, - pub auth: Option, + pub bounded: usize, + pub tracing: bool, + pub count: usize, + pub tasks: usize, + pub unix: Option, + pub host: String, + pub port: u16, + pub pool: usize, + pub quiet: bool, + pub auth: Option, } fn parse_argv() -> Arc { @@ -121,8 +119,11 @@ fn parse_argv() -> Arc { .value_of("pool") .map(|v| v.parse::().expect("Invalid pool")) .unwrap_or(1); + let bounded = matches + .value_of("bounded") + .map(|v| v.parse::().expect("Invalid bounded value")) + .unwrap_or(0); let auth = matches.value_of("auth").map(|v| v.to_owned()); - let pipeline = matches.subcommand_matches("pipeline").is_some(); Arc::new(Argv { cluster, @@ -133,7 +134,7 @@ fn parse_argv() -> Arc { tasks, host, port, - pipeline, + bounded, pool, replicas, auth, @@ -197,39 +198,63 @@ pub fn setup_tracing(enable: bool) { } fn main() { + #[cfg(feature = "dhat-heap")] + let _profiler = dhat::Profiler::new_heap(); + pretty_env_logger::init(); + #[cfg(feature = "console")] + console_subscriber::init(); + let argv = parse_argv(); info!("Running with configuration: {:?}", argv); + thread::spawn(move || { + let sch = Builder::new_multi_thread().enable_all().build().unwrap(); + sch.block_on(async move { + tokio::spawn(async move { + #[cfg(feature = "metrics")] + let monitor = tokio_metrics::RuntimeMonitor::new(&tokio::runtime::Handle::current()); - let sch = Builder::new_multi_thread().enable_all().build().unwrap(); - sch.block_on(async move { - tokio::spawn(async move { - setup_tracing(argv.tracing); - let counter = Arc::new(AtomicUsize::new(0)); - let bar = if argv.quiet { - None - } else { - Some(ProgressBar::new(argv.count as u64)) - }; - - let duration = run_benchmark(argv.clone(), counter, bar.clone()).await; - let duration_sec = duration.as_secs() as f64 + (duration.subsec_millis() as f64 / 1000.0); - if let Some(bar) = bar { - bar.finish(); - } - - if argv.quiet { - println!("{}", (argv.count as f64 / duration_sec) as u64); - } else { - println!( - "Performed {} operations in: {:?}. Throughput: {} req/sec", - argv.count, - duration, - (argv.count as f64 / duration_sec) as u64 - ); - } - global::shutdown_tracer_provider(); - }) - .await; - }); + setup_tracing(argv.tracing); + let counter = Arc::new(AtomicUsize::new(0)); + let bar = if argv.quiet { + None + } else { + Some(ProgressBar::new(argv.count as u64)) + }; + + #[cfg(feature = "metrics")] + let monitor_jh = tokio::spawn(async move { + for interval in monitor.intervals() { + println!("{:?}", interval); + tokio::time::sleep(Duration::from_secs(2)).await; + } + }); + + let duration = run_benchmark(argv.clone(), counter, bar.clone()).await; + let duration_sec = duration.as_secs() as f64 + (duration.subsec_millis() as f64 / 1000.0); + if let Some(bar) = bar { + bar.finish(); + } + + #[cfg(feature = "metrics")] + monitor_jh.abort(); + + if argv.quiet { + println!("{}", (argv.count as f64 / duration_sec) as u64); + } else { + println!( + "Performed {} operations in: {:?}. Throughput: {} req/sec", + argv.count, + duration, + (argv.count as f64 / duration_sec) as u64 + ); + } + #[cfg(feature = "tracing-deps")] + global::shutdown_tracer_provider(); + }) + .await; + }); + }) + .join() + .unwrap(); } diff --git a/bin/benchmark/valgrind.sh b/bin/benchmark/valgrind.sh new file mode 100755 index 00000000..a5d0a513 --- /dev/null +++ b/bin/benchmark/valgrind.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# may need to run as root. +# echo 0 > /proc/sys/kernel/kptr_restrict + +cargo build --release --features assert-expected +valgrind --tool=callgrind target/release/fred_benchmark -h 127.0.0.1 -p 6379 -a bar -n 100000 -P 1 -q -c 10000 pipeline +kcachegrind \ No newline at end of file diff --git a/bin/inf_loop/Cargo.toml b/bin/inf_loop/Cargo.toml index 153cac55..df411417 100644 --- a/bin/inf_loop/Cargo.toml +++ b/bin/inf_loop/Cargo.toml @@ -24,11 +24,12 @@ tracing-subscriber = "0.3.16" tracing = "0.1.37" [dependencies.fred] -path = "../.." -#path = "/fred" +#path = "../.." +path = "/fred" features = ["network-logs", "debug-ids", "replicas", "i-all", "sentinel-auth"] [features] +default = [] stdout-tracing = ["fred/partial-tracing"] partial-tracing = ["fred/partial-tracing"] full-tracing = ["fred/full-tracing"] \ No newline at end of file diff --git a/bin/inf_loop/docker-compose.yml b/bin/inf_loop/docker-compose.yml index 0bb31564..91d3e01c 100644 --- a/bin/inf_loop/docker-compose.yml +++ b/bin/inf_loop/docker-compose.yml @@ -13,7 +13,7 @@ services: REDIS_VERSION: "${REDIS_VERSION}" networks: - fred-tests - entrypoint: "cargo run --release --features partial-tracing -- ${TEST_ARGV}" + entrypoint: "cargo run --release -- ${TEST_ARGV}" environment: RUST_LOG: "${RUST_LOG}" REDIS_VERSION: "${REDIS_VERSION}" diff --git a/bin/inf_loop/src/main.rs b/bin/inf_loop/src/main.rs index 447c7503..2d750992 100644 --- a/bin/inf_loop/src/main.rs +++ b/bin/inf_loop/src/main.rs @@ -12,7 +12,7 @@ use clap::App; use fred::{ bytes::Bytes, prelude::*, - types::{ReplicaConfig, UnresponsiveConfig}, + types::config::{ReplicaConfig, UnresponsiveConfig}, }; use opentelemetry::{ global, @@ -29,16 +29,16 @@ use tracing_subscriber::{layer::SubscriberExt, Layer, Registry}; #[derive(Debug)] struct Argv { - pub cluster: bool, - pub replicas: bool, - pub host: String, - pub port: u16, - pub pool: usize, - pub interval: u64, - pub wait: u64, - pub auth: String, - pub tracing: bool, - pub sentinel: Option, + pub cluster: bool, + pub replicas: bool, + pub host: String, + pub port: u16, + pub pool: usize, + pub interval: u64, + pub wait: u64, + pub auth: String, + pub tracing: bool, + pub sentinel: Option, pub sentinel_auth: Option, } @@ -146,13 +146,13 @@ pub fn setup_tracing(enable: bool) { } #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { pretty_env_logger::init_timed(); let argv = parse_argv(); info!("Running with configuration: {:?}", argv); setup_tracing(argv.tracing); - let config = RedisConfig { + let config = Config { #[cfg(any(feature = "partial-tracing", feature = "stdout-tracing", feature = "full-tracing"))] tracing: TracingConfig { enabled: argv.tracing, @@ -164,9 +164,9 @@ async fn main() -> Result<(), RedisError> { if let Some(sentinel) = argv.sentinel.as_ref() { ServerConfig::Sentinel { service_name: sentinel.to_string(), - hosts: vec![Server::new(&argv.host, argv.port)], - password: argv.sentinel_auth.clone(), - username: None, + hosts: vec![Server::new(&argv.host, argv.port)], + password: argv.sentinel_auth.clone(), + username: None, } } else { ServerConfig::new_centralized(&argv.host, argv.port) @@ -183,23 +183,21 @@ async fn main() -> Result<(), RedisError> { .with_connection_config(|config| { config.max_command_attempts = 3; config.unresponsive = UnresponsiveConfig { - interval: Duration::from_secs(1), + interval: Duration::from_secs(1), max_timeout: Some(Duration::from_secs(5)), }; config.connection_timeout = Duration::from_secs(3); config.internal_command_timeout = Duration::from_secs(2); - //config.cluster_cache_update_delay = Duration::from_secs(20); + // config.cluster_cache_update_delay = Duration::from_secs(20); if argv.replicas { config.replica = ReplicaConfig { lazy_connections: true, primary_fallback: true, - connection_error_count: 1, ..Default::default() }; } }) .with_performance_config(|config| { - config.auto_pipeline = true; config.default_command_timeout = Duration::from_secs(60 * 5); }) .set_policy(ReconnectPolicy::new_linear(0, 5000, 100)) diff --git a/bin/replica_consistency/src/main.rs b/bin/replica_consistency/src/main.rs index c2a08736..75ae4ee3 100644 --- a/bin/replica_consistency/src/main.rs +++ b/bin/replica_consistency/src/main.rs @@ -12,7 +12,7 @@ use clap::App; use fred::{ bytes::Bytes, prelude::*, - types::{BackpressureConfig, ReconnectError, ReplicaConfig, UnresponsiveConfig}, + types::{ReconnectError, ReplicaConfig, UnresponsiveConfig}, }; use rand::{self, distributions::Alphanumeric, Rng}; use std::{ @@ -112,11 +112,6 @@ async fn main() -> Result<(), RedisError> { ]; }) .with_performance_config(|config| { - config.auto_pipeline = true; - config.backpressure = BackpressureConfig { - max_in_flight_commands: 50_000_000, - ..Default::default() - }; config.default_command_timeout = Duration::from_secs(60); }) .set_policy(ReconnectPolicy::new_constant(0, 50)) diff --git a/examples/axum.rs b/examples/axum.rs index 8d9f5a2d..9226c744 100644 --- a/examples/axum.rs +++ b/examples/axum.rs @@ -6,7 +6,7 @@ use axum::{ Router, }; use bytes::Bytes; -use fred::{clients::RedisPool, prelude::*}; +use fred::{clients::Pool, prelude::*}; use log::{debug, info}; use std::{env, str, time::Duration}; use tokio::net::TcpListener; @@ -14,7 +14,7 @@ use tokio::net::TcpListener; #[derive(Clone)] struct AppState { // all client types are cheaply cloneable - pub pool: RedisPool, + pub pool: Pool, } #[tokio::main] @@ -25,8 +25,7 @@ async fn main() { .ok() .and_then(|v| v.parse::().ok()) .unwrap_or(8); - let config = - RedisConfig::from_url("redis://foo:bar@127.0.0.1:6379").expect("Failed to create redis config from url"); + let config = Config::from_url("redis://foo:bar@127.0.0.1:6379").expect("Failed to create redis config from url"); let pool = Builder::from_config(config) .with_connection_config(|config| { config.connection_timeout = Duration::from_secs(10); @@ -51,9 +50,9 @@ async fn main() { axum::serve(listener, app).await.unwrap(); } -fn map_error(err: RedisError) -> (u16, Body) { +fn map_error(err: Error) -> (u16, Body) { let details: Body = err.details().to_string().into(); - let code = if *err.kind() == RedisErrorKind::NotFound { + let code = if *err.kind() == ErrorKind::NotFound { 404 } else if err.details().starts_with("WRONGTYPE") { 400 diff --git a/examples/basic.rs b/examples/basic.rs index d7ce7c1a..1c7d9150 100644 --- a/examples/basic.rs +++ b/examples/basic.rs @@ -2,15 +2,36 @@ #![allow(clippy::let_underscore_future)] use fred::prelude::*; +use std::time::Duration; #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { // create a config from a URL - let config = RedisConfig::from_url("redis://username:password@foo.com:6379/1")?; + let config = Config::from_url("redis://username:password@foo.com:6379/1")?; // see the `Builder` interface for more information - let client = Builder::from_config(config).build()?; + let client = Builder::from_config(config) + .with_connection_config(|config| { + config.connection_timeout = Duration::from_secs(5); + config.tcp = TcpConfig { + nodelay: Some(true), + ..Default::default() + }; + }) + .build()?; + client.init().await?; // callers can manage the tokio task driving the connections let _connection_task = client.init().await?; + + // respond to out-of-band connection errors + client.on_error(|(error, server)| async move { + println!("{:?}: Connection error: {:?}", server, error); + Ok(()) + }); + client.on_reconnect(|server| async move { + println!("Reconnected to {}", server); + Ok(()) + }); + // convert response types to most common rust types let foo: Option = client.get("foo").await?; println!("Foo: {:?}", foo); diff --git a/examples/blocking.rs b/examples/blocking.rs index 024b127c..5d182e49 100644 --- a/examples/blocking.rs +++ b/examples/blocking.rs @@ -6,11 +6,11 @@ use std::time::Duration; use tokio::time::sleep; #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { pretty_env_logger::init(); - let publisher_client = RedisClient::default(); - let subscriber_client = RedisClient::default(); + let publisher_client = Client::default(); + let subscriber_client = Client::default(); publisher_client.init().await?; subscriber_client.init().await?; diff --git a/examples/client_tracking.rs b/examples/client_tracking.rs index 869c5496..34aa76c1 100644 --- a/examples/client_tracking.rs +++ b/examples/client_tracking.rs @@ -6,7 +6,7 @@ use fred::{interfaces::TrackingInterface, prelude::*, types::RespVersion}; // that requires RESP3 and works with all deployment types, and a lower level interface that directly exposes the // `CLIENT TRACKING` commands but often requires a centralized server config. -async fn resp3_tracking_interface_example() -> Result<(), RedisError> { +async fn resp3_tracking_interface_example() -> Result<(), Error> { let client = Builder::default_centralized() .with_config(|config| { config.version = RespVersion::RESP3; @@ -46,8 +46,8 @@ async fn resp3_tracking_interface_example() -> Result<(), RedisError> { Ok(()) } -async fn resp2_basic_interface_example() -> Result<(), RedisError> { - let subscriber = RedisClient::default(); +async fn resp2_basic_interface_example() -> Result<(), Error> { + let subscriber = Client::default(); let client = subscriber.clone_new(); // RESP2 requires two connections @@ -69,7 +69,6 @@ async fn resp2_basic_interface_example() -> Result<(), RedisError> { // enable client tracking, sending invalidation messages to the subscriber client let (_, connection_id) = subscriber .connection_ids() - .await .into_iter() .next() .expect("Failed to read subscriber connection ID"); @@ -77,7 +76,7 @@ async fn resp2_basic_interface_example() -> Result<(), RedisError> { .client_tracking("on", Some(connection_id), None, false, false, false, false) .await?; - println!("Tracking info: {:?}", client.client_trackinginfo::().await?); + println!("Tracking info: {:?}", client.client_trackinginfo::().await?); println!("Redirection: {}", client.client_getredir::().await?); let pipeline = client.pipeline(); @@ -91,7 +90,7 @@ async fn resp2_basic_interface_example() -> Result<(), RedisError> { #[tokio::main] // see https://redis.io/docs/manual/client-side-caching/ -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { resp3_tracking_interface_example().await?; resp2_basic_interface_example().await?; diff --git a/examples/custom.rs b/examples/custom.rs index 35f095db..0f4d0b10 100644 --- a/examples/custom.rs +++ b/examples/custom.rs @@ -9,7 +9,7 @@ use fred::{ use std::convert::TryInto; #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { let client = Builder::default_centralized().build()?; client.init().await?; let _: () = client.lpush("foo", vec![1, 2, 3]).await?; @@ -29,6 +29,6 @@ async fn main() -> Result<(), RedisError> { .custom_raw(command, vec!["foo", "0", "3"]) .await .and_then(|frame| frame.try_into()) - .and_then(|value: RedisValue| value.convert())?; + .and_then(|value: Value| value.convert())?; Ok(()) } diff --git a/examples/dns.rs b/examples/dns.rs index 0ab80740..f8a77f80 100644 --- a/examples/dns.rs +++ b/examples/dns.rs @@ -22,7 +22,7 @@ impl Default for HickoryDnsResolver { #[async_trait] impl Resolve for HickoryDnsResolver { - async fn resolve(&self, host: Str, port: u16) -> Result, RedisError> { + async fn resolve(&self, host: Str, port: u16) -> Result, Error> { Ok( self .0 @@ -36,7 +36,7 @@ impl Resolve for HickoryDnsResolver { } #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { let client = Builder::default_centralized().build()?; client.set_resolver(Arc::new(HickoryDnsResolver::default())).await; client.init().await?; diff --git a/examples/events.rs b/examples/events.rs index 27e19b2c..435e94ae 100644 --- a/examples/events.rs +++ b/examples/events.rs @@ -20,15 +20,15 @@ use tokio_stream::wrappers::BroadcastStream; /// how one might combine multiple receiver streams in a `RedisPool` to minimize the overhead of new tokio tasks for /// each underlying client. #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { let client = Builder::default_centralized().build()?; // use the on_* functions - let _reconnect_task = client.on_reconnect(|server| { + let _reconnect_task = client.on_reconnect(|server| async move { println!("Reconnected to {}", server); Ok(()) }); - let _error_task = client.on_error(|error| { + let _error_task = client.on_error(|error| async move { println!("Connection error: {:?}", error); Ok(()) }); @@ -59,7 +59,7 @@ async fn main() -> Result<(), RedisError> { /// Shows how to combine multiple event streams from multiple clients into one tokio task. #[allow(dead_code)] -async fn setup_pool() -> Result<(), RedisError> { +async fn setup_pool() -> Result<(), Error> { let pool = Builder::default_centralized().build_pool(5)?; // `select_all` does most of the work here but requires that the channel receivers implement `Stream`. the diff --git a/examples/glommio.rs b/examples/glommio.rs index 031e32fb..372a2e1d 100644 --- a/examples/glommio.rs +++ b/examples/glommio.rs @@ -15,7 +15,7 @@ const COUNT: usize = 100_000_000; fn main() { pretty_env_logger::init(); - let config = RedisConfig::from_url("redis-cluster://foo:bar@redis-cluster-1:30001").unwrap(); + let config = Config::from_url("redis-cluster://foo:bar@redis-cluster-1:30001").unwrap(); let builder = Builder::from_config(config); let started = SystemTime::now(); @@ -43,7 +43,7 @@ fn main() { incr_foo(&pool).await?; pool.quit().await?; - Ok::<_, RedisError>(thread_id) + Ok::<_, Error>(thread_id) } }) .unwrap() @@ -65,7 +65,7 @@ fn main() { ); } -async fn incr_foo(pool: &RedisPool) -> Result<(), RedisError> { +async fn incr_foo(pool: &Pool) -> Result<(), Error> { let counter = Rc::new(RefCell::new(0)); let mut tasks = Vec::with_capacity(CONCURRENCY); for _ in 0 .. CONCURRENCY { @@ -77,7 +77,7 @@ async fn incr_foo(pool: &RedisPool) -> Result<(), RedisError> { *counter.borrow_mut() += 1; } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) })); } try_join_all(tasks).await?; diff --git a/examples/keyspace.rs b/examples/keyspace.rs index 25be8413..dd21ce04 100644 --- a/examples/keyspace.rs +++ b/examples/keyspace.rs @@ -17,19 +17,19 @@ use tokio::time::sleep; /// /// Both examples assume that the server has been configured to emit keyspace events (via `notify-keyspace-events`). #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { clustered_keyspace_events().await?; centralized_keyspace_events().await?; Ok(()) } -async fn fake_traffic(client: &RedisClient, amount: usize) -> Result<(), RedisError> { +async fn fake_traffic(client: &Client, amount: usize) -> Result<(), Error> { // use a new client since the provided client is subscribed to keyspace events let client = client.clone_new(); client.init().await?; for idx in 0 .. amount { - let key: RedisKey = format!("foo-{}", idx).into(); + let key: Key = format!("foo-{}", idx).into(); let _: () = client.set(&key, 1, None, None, false).await?; let _: () = client.incr(&key).await?; @@ -40,7 +40,7 @@ async fn fake_traffic(client: &RedisClient, amount: usize) -> Result<(), RedisEr Ok(()) } -async fn centralized_keyspace_events() -> Result<(), RedisError> { +async fn centralized_keyspace_events() -> Result<(), Error> { let subscriber = Builder::default_centralized().build()?; let reconnect_subscriber = subscriber.clone(); @@ -53,7 +53,7 @@ async fn centralized_keyspace_events() -> Result<(), RedisError> { reconnect_subscriber.psubscribe("__key__*:foo*").await?; } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); // connect after setting up the reconnection logic @@ -71,7 +71,7 @@ async fn centralized_keyspace_events() -> Result<(), RedisError> { ); } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); // generate fake traffic and wait a second @@ -81,7 +81,7 @@ async fn centralized_keyspace_events() -> Result<(), RedisError> { Ok(()) } -async fn clustered_keyspace_events() -> Result<(), RedisError> { +async fn clustered_keyspace_events() -> Result<(), Error> { let subscriber = Builder::default_clustered().build()?; let reconnect_subscriber = subscriber.clone(); @@ -98,7 +98,7 @@ async fn clustered_keyspace_events() -> Result<(), RedisError> { .await?; } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); // connect after setting up the reconnection logic @@ -116,7 +116,7 @@ async fn clustered_keyspace_events() -> Result<(), RedisError> { ); } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); // generate fake traffic and wait a second diff --git a/examples/lua.rs b/examples/lua.rs index 74ee6772..c26cf6e1 100644 --- a/examples/lua.rs +++ b/examples/lua.rs @@ -9,8 +9,8 @@ use fred::{ static SCRIPT: &str = "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}"; #[tokio::main] -async fn main() -> Result<(), RedisError> { - let client = RedisClient::default(); +async fn main() -> Result<(), Error> { + let client = Client::default(); client.init().await?; let hash = fred_utils::sha1_hash(SCRIPT); @@ -18,11 +18,11 @@ async fn main() -> Result<(), RedisError> { let _: () = client.script_load(SCRIPT).await?; } - let results: RedisValue = client.evalsha(&hash, vec!["foo", "bar"], vec![1, 2]).await?; + let results: Value = client.evalsha(&hash, vec!["foo", "bar"], vec![1, 2]).await?; println!("Script result for {hash}: {results:?}"); // or use `EVAL` - let results: RedisValue = client.eval(SCRIPT, vec!["foo", "bar"], vec![1, 2]).await?; + let results: Value = client.eval(SCRIPT, vec!["foo", "bar"], vec![1, 2]).await?; println!("Script result: {results:?}"); client.quit().await?; @@ -31,13 +31,13 @@ async fn main() -> Result<(), RedisError> { // or use the `Script` utility types #[allow(dead_code)] -async fn scripts() -> Result<(), RedisError> { - let client = RedisClient::default(); +async fn scripts() -> Result<(), Error> { + let client = Client::default(); client.init().await?; let script = Script::from_lua(SCRIPT); script.load(&client).await?; - let _result: Vec = script.evalsha(&client, vec!["foo", "bar"], vec![1, 2]).await?; + let _result: Vec = script.evalsha(&client, vec!["foo", "bar"], vec![1, 2]).await?; // retry after calling SCRIPT LOAD, if needed let (key1, key2, arg1, arg2): (String, String, i64, i64) = script .evalsha_with_reload(&client, vec!["foo", "bar"], vec![1, 2]) @@ -49,8 +49,8 @@ async fn scripts() -> Result<(), RedisError> { // use the `Function` and `Library` utility types #[allow(dead_code)] -async fn functions() -> Result<(), RedisError> { - let client = RedisClient::default(); +async fn functions() -> Result<(), Error> { + let client = Client::default(); client.init().await?; let echo_lua = include_str!("../tests/scripts/lua/echo.lua"); diff --git a/examples/misc.rs b/examples/misc.rs index 59e16d59..e11218c1 100644 --- a/examples/misc.rs +++ b/examples/misc.rs @@ -1,26 +1,16 @@ #![allow(clippy::disallowed_names)] #![allow(clippy::let_underscore_future)] -use fred::{ - prelude::*, - types::{BackpressureConfig, BackpressurePolicy, UnresponsiveConfig}, -}; +use fred::{prelude::*, types::config::UnresponsiveConfig}; use std::time::Duration; #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { let client = Builder::default_centralized() .with_performance_config(|config| { config.max_feed_count = 100; - config.auto_pipeline = true; // change the buffer size behind the event interface functions (`on_message`, etc.) config.broadcast_channel_capacity = 48; - // allow up to 25000 in-flight commands per connection - config.backpressure = BackpressureConfig { - disable_auto_backpressure: false, - max_in_flight_commands: 25_000, - policy: BackpressurePolicy::Drain, - } }) .with_connection_config(|config| { config.tcp = TcpConfig { @@ -46,15 +36,15 @@ async fn main() -> Result<(), RedisError> { // run all event listener functions in one task let _events_task = client.on_any( - |error| { + |error| async move { println!("Connection error: {:?}", error); Ok(()) }, - |server| { + |server| async move { println!("Reconnected to {:?}", server); Ok(()) }, - |changes| { + |changes| async move { println!("Cluster changed: {:?}", changes); Ok(()) }, @@ -91,7 +81,7 @@ async fn main() -> Result<(), RedisError> { // interact with specific cluster nodes without creating new connections if client.is_clustered() { // discover connections via the active connection map - let _connections = client.active_connections().await?; + let _connections = client.active_connections(); // or use the cached cluster state from `CLUSTER SLOTS` let connections = client .cached_cluster_state() @@ -104,11 +94,11 @@ async fn main() -> Result<(), RedisError> { } } - // the `RedisValue` type also works as quick way to discover the type signature of a complicated response: + // the `Value` type also works as quick way to discover the type signature of a complicated response: println!( "{:?}", client - .xreadgroup::("foo", "bar", None, None, false, "baz", ">") + .xreadgroup::("foo", "bar", None, None, false, "baz", ">") .await? ); diff --git a/examples/monitor.rs b/examples/monitor.rs index e8ffb066..df4139ed 100644 --- a/examples/monitor.rs +++ b/examples/monitor.rs @@ -7,9 +7,9 @@ use std::time::Duration; use tokio::time::sleep; #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { let monitor_jh = tokio::spawn(async move { - let config = RedisConfig::default(); + let config = Config::default(); let mut monitor_stream = monitor::run(config).await?; while let Some(command) = monitor_stream.next().await { @@ -17,10 +17,10 @@ async fn main() -> Result<(), RedisError> { println!("{}", command); } - Ok::<(), RedisError>(()) + Ok::<(), Error>(()) }); - let client = RedisClient::default(); + let client = Client::default(); client.init().await?; for idx in 0 .. 50 { diff --git a/examples/pipeline.rs b/examples/pipeline.rs index 100c9383..b837df64 100644 --- a/examples/pipeline.rs +++ b/examples/pipeline.rs @@ -4,17 +4,15 @@ use fred::prelude::*; #[tokio::main] -async fn main() -> Result<(), RedisError> { - // the `auto_pipeline` config option determines whether the client will pipeline commands across tasks. - // this example shows how to pipeline commands within one task. - let client = RedisClient::default(); +async fn main() -> Result<(), Error> { + let client = Client::default(); client.init().await?; let pipeline = client.pipeline(); // commands are queued in memory - let result: RedisValue = pipeline.incr("foo").await?; + let result: Value = pipeline.incr("foo").await?; assert!(result.is_queued()); - let result: RedisValue = pipeline.incr("foo").await?; + let result: Value = pipeline.incr("foo").await?; assert!(result.is_queued()); // send the pipeline and return all the results in order diff --git a/examples/pool.rs b/examples/pool.rs index 7142683f..3c5e9edb 100644 --- a/examples/pool.rs +++ b/examples/pool.rs @@ -4,7 +4,7 @@ use fred::prelude::*; #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { let pool = Builder::default_centralized().build_pool(5)?; pool.init().await?; @@ -22,7 +22,7 @@ async fn main() -> Result<(), RedisError> { assert_eq!(pipeline.last::().await?, 2); for client in pool.clients() { - println!("{} connected to {:?}", client.id(), client.active_connections().await?); + println!("{} connected to {:?}", client.id(), client.active_connections()); } pool.quit().await?; diff --git a/examples/pubsub.rs b/examples/pubsub.rs index 394f6164..a0d6922e 100644 --- a/examples/pubsub.rs +++ b/examples/pubsub.rs @@ -8,7 +8,7 @@ use std::time::Duration; use tokio::time::sleep; #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { let publisher_client = Builder::default_centralized() .with_performance_config(|config| { // change the buffer size of the broadcast channels used by the EventInterface @@ -20,9 +20,9 @@ async fn main() -> Result<(), RedisError> { subscriber_client.init().await?; // or use `message_rx()` to use the underlying `BroadcastReceiver` directly without spawning a new task - let _message_task = subscriber_client.on_message(|message| { + let _message_task = subscriber_client.on_message(|message| async move { println!("{}: {}", message.channel, message.value.convert::()?); - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); for idx in 0 .. 50 { @@ -37,7 +37,7 @@ async fn main() -> Result<(), RedisError> { #[cfg(feature = "subscriber-client")] #[allow(dead_code)] -async fn subscriber_example() -> Result<(), RedisError> { +async fn subscriber_example() -> Result<(), Error> { let subscriber = Builder::default_centralized() .with_performance_config(|config| { // tune the size of the buffer behind the pubsub broadcast channels @@ -53,7 +53,7 @@ async fn subscriber_example() -> Result<(), RedisError> { println!("Recv {:?} on channel {}", message.value, message.channel); } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); // spawn a task to sync subscriptions whenever the client reconnects diff --git a/examples/redis_json.rs b/examples/redis_json.rs index 399b5642..dc92a293 100644 --- a/examples/redis_json.rs +++ b/examples/redis_json.rs @@ -6,7 +6,7 @@ use serde_json::{json, Value}; // see the serde-json example for more information on deserializing responses #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { let client = Builder::default_centralized().build()?; client.init().await?; diff --git a/examples/replicas.rs b/examples/replicas.rs index d419480a..1385b181 100644 --- a/examples/replicas.rs +++ b/examples/replicas.rs @@ -4,7 +4,11 @@ use fred::{ prelude::*, - types::{ClusterDiscoveryPolicy, ClusterHash, ReplicaConfig, RespVersion}, + types::{ + config::{ClusterDiscoveryPolicy, ReplicaConfig}, + ClusterHash, + RespVersion, + }, util::redis_keyslot, }; use futures::future::try_join_all; @@ -12,10 +16,10 @@ use log::info; use std::collections::HashSet; #[tokio::main] -async fn main() -> Result<(), RedisError> { +async fn main() -> Result<(), Error> { pretty_env_logger::init(); - let config = RedisConfig::from_url("redis-cluster://foo:bar@redis-cluster-1:30001")?; + let config = Config::from_url("redis-cluster://foo:bar@redis-cluster-1:30001")?; let pool = Builder::from_config(config) .with_config(|config| { config.version = RespVersion::RESP3; @@ -43,7 +47,7 @@ async fn main() -> Result<(), RedisError> { for idx in 0 .. 1000 { let pool = pool.clone(); ops.push(async move { - let key: RedisKey = format!("foo-{}", idx).into(); + let key: Key = format!("foo-{}", idx).into(); let cluster_hash = ClusterHash::Custom(redis_keyslot(key.as_bytes())); // send WAIT to the cluster node that received SET @@ -59,7 +63,7 @@ async fn main() -> Result<(), RedisError> { let _: () = pipeline.all().await?; assert_eq!(pool.replicas().get::(&key).await?, idx); - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); } try_join_all(ops).await?; @@ -69,7 +73,7 @@ async fn main() -> Result<(), RedisError> { // use one client to demonstrate how lazy connections are created. in this case each primary node is expected to have // one replica. -async fn lazy_connection_example(client: &RedisClient) -> Result<(), RedisError> { +async fn lazy_connection_example(client: &Client) -> Result<(), Error> { let replica_routing = client.replicas().nodes(); let cluster_routing = client .cached_cluster_state() @@ -77,11 +81,11 @@ async fn lazy_connection_example(client: &RedisClient) -> Result<(), RedisError> let expected_primary = cluster_routing .get_server(redis_keyslot(b"foo")) .expect("Failed to read primary node owner for 'foo'"); - let old_connections: HashSet<_> = client.active_connections().await?.into_iter().collect(); + let old_connections: HashSet<_> = client.active_connections().into_iter().collect(); // if `lazy_connections: true` the client creates the connection here let _: () = client.replicas().get("foo").await?; - let new_connections: HashSet<_> = client.active_connections().await?.into_iter().collect(); + let new_connections: HashSet<_> = client.active_connections().into_iter().collect(); let new_servers: Vec<_> = new_connections.difference(&old_connections).collect(); // verify that 1 new connection was created, and that it's in the replica map as a replica of the expected primary // node @@ -90,7 +94,7 @@ async fn lazy_connection_example(client: &RedisClient) -> Result<(), RedisError> // update the replica routing table and reset replica connections client.replicas().sync(true).await?; - assert_eq!(old_connections.len(), client.active_connections().await?.len()); + assert_eq!(old_connections.len(), client.active_connections().len()); Ok(()) } diff --git a/examples/scan.rs b/examples/scan.rs index 8988a1ce..f95c5efc 100644 --- a/examples/scan.rs +++ b/examples/scan.rs @@ -2,42 +2,86 @@ #![allow(clippy::let_underscore_future)] #![allow(dead_code)] -use fred::{prelude::*, types::Scanner}; +use bytes_utils::Str; +use fred::{prelude::*, types::scan::Scanner}; use futures::stream::TryStreamExt; -async fn create_fake_data(client: &RedisClient) -> Result<(), RedisError> { +async fn create_fake_data(client: &Client) -> Result<(), Error> { let values: Vec<(String, i64)> = (0 .. 50).map(|i| (format!("foo-{}", i), i)).collect(); client.mset(values).await } -async fn delete_fake_data(client: &RedisClient) -> Result<(), RedisError> { +async fn delete_fake_data(client: &Client) -> Result<(), Error> { let keys: Vec<_> = (0 .. 50).map(|i| format!("foo-{}", i)).collect(); client.del::<(), _>(keys).await?; Ok(()) } -#[tokio::main] -async fn main() -> Result<(), RedisError> { - let client = RedisClient::default(); - client.init().await?; - create_fake_data(&client).await?; - +/// Scan the server, throttling the pagination process so the client only holds one page of keys in memory at a time. +async fn scan_throttled(client: &Client) -> Result<(), Error> { // scan all keys in the keyspace, returning 10 keys per page let mut scan_stream = client.scan("foo*", Some(10), None); while let Some(mut page) = scan_stream.try_next().await? { if let Some(keys) = page.take_results() { - // create a client from the scan result, reusing the existing connection(s) - let client = page.create_client(); - for key in keys.into_iter() { - let value: RedisValue = client.get(&key).await?; + let value: Value = client.get(&key).await?; println!("Scanned {} -> {:?}", key.as_str_lossy(), value); } } - // **important:** move on to the next page now that we're done reading the values - let _ = page.next(); + // callers can call `page.next()` to control when the next page is fetched from the server. if this is not called + // then the next page will be fetched when `page` is dropped. + page.next(); } + Ok(()) +} + +/// Scan the server as quickly as possible, buffering pending keys in memory on the client. +async fn scan_buffered(client: &Client) -> Result<(), Error> { + client + .scan_buffered("foo*", Some(10), None) + .try_for_each_concurrent(10, |key| async move { + let value: Value = client.get(&key).await?; + println!("Scanned {} -> {:?}", key.as_str_lossy(), value); + Ok(()) + }) + .await +} + +/// Example showing how to scan a server one page a time with a custom cursor. +async fn scan_with_cursor(client: &Client) -> Result<(), Error> { + let mut cursor: Str = "0".into(); + // break out after 1000 records + let max_keys = 1000; + let mut count = 0; + + loop { + let (new_cursor, keys): (Str, Vec) = client.scan_page(cursor, "*", Some(100), None).await?; + count += keys.len(); + + for key in keys.into_iter() { + let val: Value = client.get(&key).await?; + println!("Scanned {} -> {:?}", key.as_str_lossy(), val); + } + + if count >= max_keys || new_cursor == "0" { + break; + } else { + cursor = new_cursor; + } + } + Ok(()) +} + +#[tokio::main] +async fn main() -> Result<(), Error> { + let client = Client::default(); + client.init().await?; + create_fake_data(&client).await?; + + scan_buffered(&client).await?; + scan_throttled(&client).await?; + scan_with_cursor(&client).await?; delete_fake_data(&client).await?; client.quit().await?; @@ -56,10 +100,10 @@ async fn main() -> Result<(), RedisError> { /// /// The best option depends on several factors, but `scan_cluster` is often the easiest approach for most use /// cases. -async fn pool_scan_cluster_memory_example(pool: &RedisPool) -> Result<(), RedisError> { - // the majority of the client traffic in this scenario comes from the MEMORY USAGE call on each key, so we'll use a - // pool to round-robin these commands among multiple clients. a clustered client with `auto_pipeline: true` can scan - // all nodes in the cluster concurrently, so we use a single client rather than a pool to issue the SCAN calls. +async fn pool_scan_cluster_memory_example(pool: &Pool) -> Result<(), Error> { + // The majority of the client traffic in this scenario comes from the MEMORY USAGE call on each key, so we'll use a + // pool to round-robin these commands among multiple clients. A single client can scan all nodes in the cluster + // concurrently, so we use a single client rather than a pool to issue the SCAN calls. let mut total_size = 0; // if the pattern contains a hash tag then callers can use `scan` instead of `scan_cluster` let mut scanner = pool.next().scan_cluster("*", Some(100), None); @@ -81,7 +125,7 @@ async fn pool_scan_cluster_memory_example(pool: &RedisPool) -> Result<(), RedisE } } - let _ = page.next(); + page.next(); } println!("Total size: {}", total_size); diff --git a/examples/sentinel.rs b/examples/sentinel.rs index a5afc644..916c924b 100644 --- a/examples/sentinel.rs +++ b/examples/sentinel.rs @@ -1,11 +1,11 @@ #![allow(clippy::disallowed_names)] #![allow(clippy::let_underscore_future)] -use fred::{prelude::*, types::Server}; +use fred::{prelude::*, types::config::Server}; #[tokio::main] -async fn main() -> Result<(), RedisError> { - let config = RedisConfig { +async fn main() -> Result<(), Error> { + let config = Config { server: ServerConfig::Sentinel { // the name of the service, as configured in the sentinel configuration service_name: "my-service-name".into(), diff --git a/examples/serde_json.rs b/examples/serde_json.rs index 3e912ff3..03ac8418 100644 --- a/examples/serde_json.rs +++ b/examples/serde_json.rs @@ -14,8 +14,8 @@ struct Person { } #[tokio::main] -async fn main() -> Result<(), RedisError> { - let client = RedisClient::default(); +async fn main() -> Result<(), Error> { + let client = Client::default(); client.init().await?; let value = json!({ diff --git a/examples/streams.rs b/examples/streams.rs index fa986b50..d4574ed5 100644 --- a/examples/streams.rs +++ b/examples/streams.rs @@ -1,7 +1,7 @@ #![allow(clippy::mutable_key_type)] use bytes_utils::Str; -use fred::{prelude::*, types::XReadResponse}; +use fred::{prelude::*, types::streams::XReadResponse}; use futures::future::try_join_all; use std::time::Duration; use tokio::time::sleep; @@ -43,7 +43,7 @@ async fn main() { } client.quit().await?; - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); let writer_task = tokio::spawn(async move { @@ -72,7 +72,7 @@ async fn main() { } client.quit().await?; - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); try_join_all([writer_task, reader_task]).await.unwrap(); diff --git a/examples/tls.rs b/examples/tls.rs index ce99c2f3..38e7fe7b 100644 --- a/examples/tls.rs +++ b/examples/tls.rs @@ -8,7 +8,7 @@ use fred::prelude::*; feature = "enable-rustls", feature = "enable-rustls-ring" ))] -use fred::types::TlsConnector; +use fred::types::config::TlsConnector; #[cfg(feature = "enable-native-tls")] fn create_tls_config() -> TlsConnector { @@ -39,15 +39,15 @@ fn create_tls_config() -> TlsConnector { } #[tokio::main] -async fn main() -> Result<(), RedisError> { - let config = RedisConfig { +async fn main() -> Result<(), Error> { + let config = Config { #[cfg(any( feature = "enable-rustls", feature = "enable-native-tls", feature = "enable-rustls-ring" ))] tls: Some(create_tls_config().into()), - ..RedisConfig::default() + ..Config::default() }; let client = Builder::from_config(config).build()?; client.init().await?; diff --git a/examples/transactions.rs b/examples/transactions.rs index f46fec9d..490b2433 100644 --- a/examples/transactions.rs +++ b/examples/transactions.rs @@ -4,17 +4,17 @@ use fred::prelude::*; #[tokio::main] -async fn main() -> Result<(), RedisError> { - let client = RedisClient::default(); +async fn main() -> Result<(), Error> { + let client = Client::default(); client.init().await?; // transactions are buffered in memory before calling `exec` let trx = client.multi(); - let result: RedisValue = trx.get("foo").await?; + let result: Value = trx.get("foo").await?; assert!(result.is_queued()); - let result: RedisValue = trx.set("foo", "bar", None, None, false).await?; + let result: Value = trx.set("foo", "bar", None, None, false).await?; assert!(result.is_queued()); - let result: RedisValue = trx.get("foo").await?; + let result: Value = trx.get("foo").await?; assert!(result.is_queued()); let values: (Option, (), String) = trx.exec(true).await?; diff --git a/src/clients/redis.rs b/src/clients/client.rs similarity index 74% rename from src/clients/redis.rs rename to src/clients/client.rs index 89c14acc..42b80775 100644 --- a/src/clients/redis.rs +++ b/src/clients/client.rs @@ -1,155 +1,157 @@ +#[cfg(feature = "replicas")] +use crate::clients::Replicas; +#[cfg(feature = "i-tracking")] +use crate::interfaces::TrackingInterface; use crate::{ clients::{Pipeline, WithOptions}, commands, - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces::*, - modules::inner::RedisClientInner, - prelude::ClientLike, + modules::inner::ClientInner, + prelude::{ClientLike, Config, ConnectionConfig, Options, PerformanceConfig, ReconnectPolicy, Server}, runtime::RefCount, - types::*, + types::{ + scan::{HScanResult, SScanResult, ScanResult, ScanType, ZScanResult}, + *, + }, }; use bytes_utils::Str; use futures::Stream; use std::{fmt, fmt::Formatter}; -#[cfg(feature = "replicas")] -use crate::clients::Replicas; -#[cfg(feature = "i-tracking")] -use crate::interfaces::TrackingInterface; - -/// A cheaply cloneable Redis client struct. +/// A cheaply cloneable client struct. #[derive(Clone)] -pub struct RedisClient { - pub(crate) inner: RefCount, +pub struct Client { + pub(crate) inner: RefCount, } -impl Default for RedisClient { +impl Default for Client { fn default() -> Self { - RedisClient::new(RedisConfig::default(), None, None, None) + Client::new(Config::default(), None, None, None) } } -impl fmt::Debug for RedisClient { +impl fmt::Debug for Client { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RedisClient") + f.debug_struct("Client") .field("id", &self.inner.id) .field("state", &self.state()) .finish() } } -impl fmt::Display for RedisClient { +impl fmt::Display for Client { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", self.inner.id) } } #[doc(hidden)] -impl<'a> From<&'a RefCount> for RedisClient { - fn from(inner: &'a RefCount) -> RedisClient { - RedisClient { inner: inner.clone() } +impl<'a> From<&'a RefCount> for Client { + fn from(inner: &'a RefCount) -> Client { + Client { inner: inner.clone() } } } -impl ClientLike for RedisClient { +impl ClientLike for Client { #[doc(hidden)] - fn inner(&self) -> &RefCount { + fn inner(&self) -> &RefCount { &self.inner } } -impl EventInterface for RedisClient {} +impl EventInterface for Client {} #[cfg(feature = "i-redis-json")] #[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] -impl RedisJsonInterface for RedisClient {} +impl RedisJsonInterface for Client {} #[cfg(feature = "i-time-series")] #[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] -impl TimeSeriesInterface for RedisClient {} +impl TimeSeriesInterface for Client {} #[cfg(feature = "i-acl")] #[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] -impl AclInterface for RedisClient {} +impl AclInterface for Client {} #[cfg(feature = "i-client")] #[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] -impl ClientInterface for RedisClient {} +impl ClientInterface for Client {} #[cfg(feature = "i-cluster")] #[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] -impl ClusterInterface for RedisClient {} +impl ClusterInterface for Client {} #[cfg(feature = "i-config")] #[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] -impl ConfigInterface for RedisClient {} +impl ConfigInterface for Client {} #[cfg(feature = "i-geo")] #[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] -impl GeoInterface for RedisClient {} +impl GeoInterface for Client {} #[cfg(feature = "i-hashes")] #[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] -impl HashesInterface for RedisClient {} +impl HashesInterface for Client {} #[cfg(feature = "i-hyperloglog")] #[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] -impl HyperloglogInterface for RedisClient {} -impl MetricsInterface for RedisClient {} +impl HyperloglogInterface for Client {} +impl MetricsInterface for Client {} #[cfg(feature = "transactions")] #[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] -impl TransactionInterface for RedisClient {} +impl TransactionInterface for Client {} #[cfg(feature = "i-keys")] #[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] -impl KeysInterface for RedisClient {} +impl KeysInterface for Client {} #[cfg(feature = "i-scripts")] #[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] -impl LuaInterface for RedisClient {} +impl LuaInterface for Client {} #[cfg(feature = "i-lists")] #[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] -impl ListInterface for RedisClient {} +impl ListInterface for Client {} #[cfg(feature = "i-memory")] #[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] -impl MemoryInterface for RedisClient {} -impl AuthInterface for RedisClient {} +impl MemoryInterface for Client {} +impl AuthInterface for Client {} #[cfg(feature = "i-server")] #[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] -impl ServerInterface for RedisClient {} +impl ServerInterface for Client {} #[cfg(feature = "i-slowlog")] #[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] -impl SlowlogInterface for RedisClient {} +impl SlowlogInterface for Client {} #[cfg(feature = "i-sets")] #[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] -impl SetsInterface for RedisClient {} +impl SetsInterface for Client {} #[cfg(feature = "i-sorted-sets")] #[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] -impl SortedSetsInterface for RedisClient {} +impl SortedSetsInterface for Client {} #[cfg(feature = "i-server")] #[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] -impl HeartbeatInterface for RedisClient {} +impl HeartbeatInterface for Client {} #[cfg(feature = "i-streams")] #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] -impl StreamsInterface for RedisClient {} +impl StreamsInterface for Client {} #[cfg(feature = "i-scripts")] #[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] -impl FunctionInterface for RedisClient {} +impl FunctionInterface for Client {} #[cfg(feature = "i-tracking")] #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] -impl TrackingInterface for RedisClient {} +impl TrackingInterface for Client {} #[cfg(feature = "i-pubsub")] #[cfg_attr(docsrs, doc(cfg(feature = "i-pubsub")))] -impl PubsubInterface for RedisClient {} +impl PubsubInterface for Client {} #[cfg(feature = "i-redisearch")] #[cfg_attr(docsrs, doc(cfg(feature = "i-redisearch")))] -impl RediSearchInterface for RedisClient {} +impl RediSearchInterface for Client {} -impl RedisClient { +impl Client { /// Create a new client instance without connecting to the server. /// /// See the [builder](crate::types::Builder) interface for more information. pub fn new( - config: RedisConfig, + config: Config, perf: Option, connection: Option, policy: Option, - ) -> RedisClient { - RedisClient { - inner: RedisClientInner::new(config, perf.unwrap_or_default(), connection.unwrap_or_default(), policy), + ) -> Client { + Client { + inner: ClientInner::new(config, perf.unwrap_or_default(), connection.unwrap_or_default(), policy), } } - /// Create a new `RedisClient` from the config provided to this client. + /// Create a new `Client` from the config provided to this client. /// /// The returned client will **not** be connected to the server. pub fn clone_new(&self) -> Self { @@ -158,7 +160,7 @@ impl RedisClient { policy.reset_attempts(); } - RedisClient::new( + Client::new( self.inner.config.as_ref().clone(), Some(self.inner.performance_config()), Some(self.inner.connection_config()), @@ -166,19 +168,19 @@ impl RedisClient { ) } - /// Split a clustered Redis client into a set of centralized clients - one for each primary node in the cluster. + /// Split a clustered client into a set of centralized clients - one for each primary node in the cluster. /// - /// Alternatively, callers can use [with_cluster_node](crate::clients::RedisClient::with_cluster_node) to avoid + /// Alternatively, callers can use [with_cluster_node](crate::clients::Client::with_cluster_node) to avoid /// creating new connections. /// /// The clients returned by this function will not be connected to their associated servers. The caller needs to /// call `connect` on each client before sending any commands. - pub fn split_cluster(&self) -> Result, RedisError> { + pub fn split_cluster(&self) -> Result, Error> { if self.inner.config.server.is_clustered() { commands::server::split(&self.inner) } else { - Err(RedisError::new( - RedisErrorKind::Unknown, + Err(Error::new( + ErrorKind::Unknown, "Client is not using a clustered deployment.", )) } @@ -200,11 +202,11 @@ impl RedisClient { pattern: P, count: Option, r#type: Option, - ) -> impl Stream> + ) -> impl Stream> where P: Into, { - commands::scan::scan(&self.inner, pattern.into(), count, r#type, None) + commands::scan::scan(&self.inner, pattern.into(), count, r#type) } /// Scan the keys in the keyspace, buffering all results in memory as quickly as the server returns them. @@ -222,7 +224,7 @@ impl RedisClient { pattern: P, count: Option, r#type: Option, - ) -> impl Stream> + ) -> impl Stream> where P: Into, { @@ -237,7 +239,7 @@ impl RedisClient { /// state changes. /// /// Unlike `SCAN`, `HSCAN`, etc, the returned stream may continue even if - /// [has_more](crate::types::ScanResult::has_more) returns false on a given page of keys. + /// [has_more](crate::types::scan::Scanner::has_more) returns false on a given page of keys. /// /// See [scan_buffered](Self::scan_buffered) or [scan_cluster_buffered](Self::scan_cluster_buffered) for /// alternatives that automatically continue scanning in the background. @@ -246,7 +248,7 @@ impl RedisClient { pattern: P, count: Option, r#type: Option, - ) -> impl Stream> + ) -> impl Stream> where P: Into, { @@ -269,7 +271,7 @@ impl RedisClient { pattern: P, count: Option, r#type: Option, - ) -> impl Stream> + ) -> impl Stream> where P: Into, { @@ -280,14 +282,9 @@ impl RedisClient { /// specified. /// /// - pub fn hscan( - &self, - key: K, - pattern: P, - count: Option, - ) -> impl Stream> + pub fn hscan(&self, key: K, pattern: P, count: Option) -> impl Stream> where - K: Into, + K: Into, P: Into, { commands::scan::hscan(&self.inner, key.into(), pattern.into(), count) @@ -296,14 +293,9 @@ impl RedisClient { /// Incrementally iterate over pages of the set stored at `key`, returning `count` results per page, if specified. /// /// - pub fn sscan( - &self, - key: K, - pattern: P, - count: Option, - ) -> impl Stream> + pub fn sscan(&self, key: K, pattern: P, count: Option) -> impl Stream> where - K: Into, + K: Into, P: Into, { commands::scan::sscan(&self.inner, key.into(), pattern.into(), count) @@ -313,21 +305,16 @@ impl RedisClient { /// specified. /// /// - pub fn zscan( - &self, - key: K, - pattern: P, - count: Option, - ) -> impl Stream> + pub fn zscan(&self, key: K, pattern: P, count: Option) -> impl Stream> where - K: Into, + K: Into, P: Into, { commands::scan::zscan(&self.inner, key.into(), pattern.into(), count) } /// Send a series of commands in a [pipeline](https://redis.io/docs/manual/pipelining/). - pub fn pipeline(&self) -> Pipeline { + pub fn pipeline(&self) -> Pipeline { Pipeline::from(self.clone()) } @@ -337,13 +324,13 @@ impl RedisClient { /// /// ```rust /// # use fred::prelude::*; - /// async fn example(client: &RedisClient) -> Result<(), RedisError> { - /// // discover servers via the `RedisConfig` or active connections + /// async fn example(client: &Client) -> Result<(), Error> { + /// // discover servers via the `Config` or active connections /// let connections = client.active_connections().await?; /// /// // ping each node in the cluster individually /// for server in connections.into_iter() { - /// let _: () = client.with_cluster_node(server).ping().await?; + /// let _: () = client.with_cluster_node(server).ping(None).await?; /// } /// /// // or use the cached cluster routing table to discover servers @@ -391,7 +378,7 @@ impl RedisClient { /// Create a client that interacts with replica nodes. #[cfg(feature = "replicas")] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] - pub fn replicas(&self) -> Replicas { + pub fn replicas(&self) -> Replicas { Replicas::from(&self.inner) } } diff --git a/src/clients/mod.rs b/src/clients/mod.rs index 2906a02e..f14a8693 100644 --- a/src/clients/mod.rs +++ b/src/clients/mod.rs @@ -1,12 +1,12 @@ +mod client; mod options; mod pipeline; mod pool; -mod redis; +pub use client::Client; pub use options::WithOptions; pub use pipeline::Pipeline; -pub use pool::RedisPool; -pub use redis::RedisClient; +pub use pool::Pool; #[cfg(not(feature = "glommio"))] pub use pool::ExclusivePool; diff --git a/src/clients/options.rs b/src/clients/options.rs index 19a89fed..36de905d 100644 --- a/src/clients/options.rs +++ b/src/clients/options.rs @@ -1,22 +1,25 @@ use crate::{ - error::RedisError, + error::Error, interfaces::*, - modules::inner::RedisClientInner, - protocol::command::RedisCommand, + modules::inner::ClientInner, + protocol::command::Command, runtime::RefCount, - types::Options, + types::config::Options, }; use std::{fmt, ops::Deref}; +#[cfg(feature = "replicas")] +use crate::clients::Replicas; + /// A client interface used to customize command configuration options. /// -/// See [Options](crate::types::Options) for more information. +/// See [Options](crate::types::config::Options) for more information. /// /// ```rust /// # use fred::prelude::*; /// # use std::time::Duration; -/// async fn example() -> Result<(), RedisError> { -/// let client = RedisClient::default(); +/// async fn example() -> Result<(), Error> { +/// let client = Client::default(); /// client.init().await?; /// /// let options = Options { @@ -53,6 +56,13 @@ impl WithOptions { pub fn options(&self) -> &Options { &self.options } + + /// Create a client that interacts with replica nodes. + #[cfg(feature = "replicas")] + #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] + pub fn replicas(&self) -> Replicas> { + Replicas { client: self.clone() } + } } impl Deref for WithOptions { @@ -74,22 +84,22 @@ impl fmt::Debug for WithOptions { impl ClientLike for WithOptions { #[doc(hidden)] - fn inner(&self) -> &RefCount { + fn inner(&self) -> &RefCount { self.client.inner() } #[doc(hidden)] - fn change_command(&self, command: &mut RedisCommand) { + fn change_command(&self, command: &mut Command) { self.client.change_command(command); self.options.apply(command); } #[doc(hidden)] - fn send_command(&self, command: T) -> Result<(), RedisError> + fn send_command(&self, command: T) -> Result<(), Error> where - T: Into, + T: Into, { - let mut command: RedisCommand = command.into(); + let mut command: Command = command.into(); self.options.apply(&mut command); self.client.send_command(command) } diff --git a/src/clients/pipeline.rs b/src/clients/pipeline.rs index eaa6b34a..42137139 100644 --- a/src/clients/pipeline.rs +++ b/src/clients/pipeline.rs @@ -1,10 +1,10 @@ use crate::{ - error::RedisError, + error::Error, interfaces::{self, *}, - modules::{inner::RedisClientInner, response::FromRedis}, - prelude::{RedisResult, RedisValue}, + modules::{inner::ClientInner, response::FromValue}, + prelude::{FredResult, Value}, protocol::{ - command::{RedisCommand, RouterCommand}, + command::{Command, RouterCommand}, responders::ResponseKind, utils as protocol_utils, }, @@ -13,14 +13,14 @@ use crate::{ }; use std::{collections::VecDeque, fmt, fmt::Formatter}; -fn clone_buffered_commands(buffer: &Mutex>) -> VecDeque { +fn clone_buffered_commands(buffer: &Mutex>) -> VecDeque { buffer.lock().iter().map(|c| c.duplicate(ResponseKind::Skip)).collect() } fn prepare_all_commands( - commands: VecDeque, + commands: VecDeque, error_early: bool, -) -> (RouterCommand, OneshotReceiver>) { +) -> (RouterCommand, OneshotReceiver>) { let (tx, rx) = oneshot_channel(); let expected_responses = commands .iter() @@ -29,7 +29,7 @@ fn prepare_all_commands( let mut response = ResponseKind::new_buffer_with_size(expected_responses, tx); response.set_error_early(error_early); - let commands: Vec = commands + let commands: Vec = commands .into_iter() .enumerate() .map(|(idx, mut cmd)| { @@ -47,7 +47,7 @@ fn prepare_all_commands( /// /// See the [all](Self::all), [last](Self::last), and [try_all](Self::try_all) functions for more information. pub struct Pipeline { - commands: RefCount>>, + commands: RefCount>>, client: C, } @@ -82,22 +82,22 @@ impl From for Pipeline { impl ClientLike for Pipeline { #[doc(hidden)] - fn inner(&self) -> &RefCount { + fn inner(&self) -> &RefCount { self.client.inner() } #[doc(hidden)] - fn change_command(&self, command: &mut RedisCommand) { + fn change_command(&self, command: &mut Command) { self.client.change_command(command); } #[doc(hidden)] #[allow(unused_mut)] - fn send_command(&self, command: T) -> Result<(), RedisError> + fn send_command(&self, command: T) -> Result<(), Error> where - T: Into, + T: Into, { - let mut command: RedisCommand = command.into(); + let mut command: Command = command.into(); self.change_command(&mut command); if let Some(mut tx) = command.take_responder() { @@ -183,7 +183,7 @@ impl Pipeline { /// /// ```rust no_run /// # use fred::prelude::*; - /// async fn example(client: &RedisClient) -> Result<(), RedisError> { + /// async fn example(client: &Client) -> Result<(), Error> { /// let _ = client.mset(vec![("foo", 1), ("bar", 2)]).await?; /// /// let pipeline = client.pipeline(); @@ -195,9 +195,9 @@ impl Pipeline { /// Ok(()) /// } /// ``` - pub async fn all(&self) -> Result + pub async fn all(&self) -> Result where - R: FromRedis, + R: FromValue, { let commands = clone_buffered_commands(&self.commands); send_all(self.client.inner(), commands).await?.convert() @@ -205,28 +205,28 @@ impl Pipeline { /// Send the pipeline and respond with each individual result. /// - /// Note: use `RedisValue` as the return type (and [convert](crate::types::RedisValue::convert) as needed) to + /// Note: use `Value` as the return type (and [convert](crate::types::Value::convert) as needed) to /// support an array of different return types. /// /// ```rust no_run /// # use fred::prelude::*; - /// async fn example(client: &RedisClient) -> Result<(), RedisError> { + /// async fn example(client: &Client) -> Result<(), Error> { /// let _ = client.mset(vec![("foo", 1), ("bar", 2)]).await?; /// /// let pipeline = client.pipeline(); /// let _: () = pipeline.get("foo").await?; /// let _: () = pipeline.hgetall("bar").await?; // this will error since `bar` is an integer /// - /// let results = pipeline.try_all::().await; + /// let results = pipeline.try_all::().await; /// assert_eq!(results[0].clone()?.convert::()?, 1); /// assert!(results[1].is_err()); /// /// Ok(()) /// } /// ``` - pub async fn try_all(&self) -> Vec> + pub async fn try_all(&self) -> Vec> where - R: FromRedis, + R: FromValue, { let commands = clone_buffered_commands(&self.commands); try_send_all(self.client.inner(), commands) @@ -240,7 +240,7 @@ impl Pipeline { /// /// ```rust no_run /// # use fred::prelude::*; - /// async fn example(client: &RedisClient) -> Result<(), RedisError> { + /// async fn example(client: &Client) -> Result<(), Error> { /// let pipeline = client.pipeline(); /// let _: () = pipeline.incr("foo").await?; // returns when the command is queued in memory /// let _: () = pipeline.incr("foo").await?; // returns when the command is queued in memory @@ -251,19 +251,16 @@ impl Pipeline { /// Ok(()) /// } /// ``` - pub async fn last(&self) -> Result + pub async fn last(&self) -> Result where - R: FromRedis, + R: FromValue, { let commands = clone_buffered_commands(&self.commands); send_last(self.client.inner(), commands).await?.convert() } } -async fn try_send_all( - inner: &RefCount, - commands: VecDeque, -) -> Vec> { +async fn try_send_all(inner: &RefCount, commands: VecDeque) -> Vec> { if commands.is_empty() { return Vec::new(); } @@ -290,12 +287,9 @@ async fn try_send_all( } } -async fn send_all( - inner: &RefCount, - commands: VecDeque, -) -> Result { +async fn send_all(inner: &RefCount, commands: VecDeque) -> Result { if commands.is_empty() { - return Ok(RedisValue::Array(Vec::new())); + return Ok(Value::Array(Vec::new())); } let (mut command, rx) = prepare_all_commands(commands, true); @@ -307,17 +301,14 @@ async fn send_all( protocol_utils::frame_to_results(frame) } -async fn send_last( - inner: &RefCount, - commands: VecDeque, -) -> Result { +async fn send_last(inner: &RefCount, commands: VecDeque) -> Result { if commands.is_empty() { - return Ok(RedisValue::Null); + return Ok(Value::Null); } let len = commands.len(); let (tx, rx) = oneshot_channel(); - let mut commands: Vec = commands.into_iter().collect(); + let mut commands: Vec = commands.into_iter().collect(); commands[len - 1].response = ResponseKind::Respond(Some(tx)); let mut command = RouterCommand::Pipeline { commands }; command.inherit_options(inner); diff --git a/src/clients/pool.rs b/src/clients/pool.rs index f427e5eb..1f82340c 100644 --- a/src/clients/pool.rs +++ b/src/clients/pool.rs @@ -1,10 +1,13 @@ use crate::{ - clients::RedisClient, - error::{RedisError, RedisErrorKind}, + clients::Client, + error::{Error, ErrorKind}, interfaces::*, - modules::inner::RedisClientInner, + modules::inner::ClientInner, runtime::{sleep, spawn, AtomicBool, AtomicUsize, RefCount}, - types::{ConnectHandle, ConnectionConfig, PerformanceConfig, ReconnectPolicy, RedisConfig, Server}, + types::{ + config::{Config, ConnectionConfig, PerformanceConfig, ReconnectPolicy, Server}, + ConnectHandle, + }, utils, }; use fred_macros::rm_send_if; @@ -18,8 +21,8 @@ use crate::protocol::types::Resolve; #[cfg(not(feature = "glommio"))] pub use tokio::sync::{Mutex as AsyncMutex, OwnedMutexGuard}; -struct RedisPoolInner { - clients: Vec, +struct PoolInner { + clients: Vec, counter: AtomicUsize, prefer_connected: AtomicBool, } @@ -28,7 +31,7 @@ struct RedisPoolInner { /// /// ### Restrictions /// -/// The following interfaces are not implemented on `RedisPool`: +/// The following interfaces are not implemented on `Pool`: /// * [MetricsInterface](crate::interfaces::MetricsInterface) /// * [PubsubInterface](crate::interfaces::PubsubInterface) /// * [EventInterface](crate::interfaces::EventInterface) @@ -37,14 +40,14 @@ struct RedisPoolInner { /// /// In many cases, such as [publish](crate::interfaces::PubsubInterface::publish), callers can work around this by /// adding a call to [next](Self::next), but in some scenarios this may not work. As a general rule, any commands -/// that change or depend on local connection state will not be implemented directly on `RedisPool`. Callers can use +/// that change or depend on local connection state will not be implemented directly on `Pool`. Callers can use /// [clients](Self::clients), [next](Self::next), or [last](Self::last) to operate on individual clients if needed. #[derive(Clone)] -pub struct RedisPool { - inner: RefCount, +pub struct Pool { + inner: RefCount, } -impl fmt::Debug for RedisPool { +impl fmt::Debug for Pool { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("RedisPool") .field("size", &self.inner.clients.len()) @@ -56,14 +59,14 @@ impl fmt::Debug for RedisPool { } } -impl RedisPool { +impl Pool { /// Create a new pool from an existing set of clients. - pub fn from_clients(clients: Vec) -> Result { + pub fn from_clients(clients: Vec) -> Result { if clients.is_empty() { - Err(RedisError::new(RedisErrorKind::Config, "Pool cannot be empty.")) + Err(Error::new(ErrorKind::Config, "Pool cannot be empty.")) } else { - Ok(RedisPool { - inner: RefCount::new(RedisPoolInner { + Ok(Pool { + inner: RefCount::new(PoolInner { clients, counter: AtomicUsize::new(0), prefer_connected: AtomicBool::new(true), @@ -76,18 +79,18 @@ impl RedisPool { /// /// See the [builder](crate::types::Builder) interface for more information. pub fn new( - config: RedisConfig, + config: Config, perf: Option, connection: Option, policy: Option, size: usize, - ) -> Result { + ) -> Result { if size == 0 { - Err(RedisError::new(RedisErrorKind::Config, "Pool cannot be empty.")) + Err(Error::new(ErrorKind::Config, "Pool cannot be empty.")) } else { let mut clients = Vec::with_capacity(size); for _ in 0 .. size { - clients.push(RedisClient::new( + clients.push(Client::new( config.clone(), perf.clone(), connection.clone(), @@ -95,8 +98,8 @@ impl RedisPool { )); } - Ok(RedisPool { - inner: RefCount::new(RedisPoolInner { + Ok(Pool { + inner: RefCount::new(PoolInner { clients, counter: AtomicUsize::new(0), prefer_connected: AtomicBool::new(true), @@ -112,7 +115,7 @@ impl RedisPool { } /// Read the individual clients in the pool. - pub fn clients(&self) -> &[RedisClient] { + pub fn clients(&self) -> &[Client] { &self.inner.clients } @@ -129,7 +132,7 @@ impl RedisPool { } /// Read the next connected client that should run the next command. - pub fn next_connected(&self) -> &RedisClient { + pub fn next_connected(&self) -> &Client { let mut idx = utils::incr_atomic(&self.inner.counter) % self.inner.clients.len(); for _ in 0 .. self.inner.clients.len() { @@ -144,27 +147,27 @@ impl RedisPool { } /// Read the client that should run the next command. - pub fn next(&self) -> &RedisClient { + pub fn next(&self) -> &Client { &self.inner.clients[utils::incr_atomic(&self.inner.counter) % self.inner.clients.len()] } /// Read the client that ran the last command. - pub fn last(&self) -> &RedisClient { + pub fn last(&self) -> &Client { &self.inner.clients[utils::read_atomic(&self.inner.counter) % self.inner.clients.len()] } /// Create a client that interacts with the replica nodes associated with the [next](Self::next) client. #[cfg(feature = "replicas")] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] - pub fn replicas(&self) -> Replicas { + pub fn replicas(&self) -> Replicas { Replicas::from(self.inner()) } } #[rm_send_if(feature = "glommio")] -impl ClientLike for RedisPool { +impl ClientLike for Pool { #[doc(hidden)] - fn inner(&self) -> &RefCount { + fn inner(&self) -> &RefCount { if utils::read_bool_atomic(&self.inner.prefer_connected) { &self.next_connected().inner } else { @@ -172,8 +175,8 @@ impl ClientLike for RedisPool { } } - /// Update the internal [PerformanceConfig](crate::types::PerformanceConfig) on each client in place with new - /// values. + /// Update the internal [PerformanceConfig](crate::types::config::PerformanceConfig) on each client in place with + /// new values. fn update_perf_config(&self, config: PerformanceConfig) { for client in self.inner.clients.iter() { client.update_perf_config(config.clone()); @@ -181,21 +184,10 @@ impl ClientLike for RedisPool { } /// Read the set of active connections across all clients in the pool. - fn active_connections(&self) -> impl Future, RedisError>> + Send { - async move { - let all_connections = try_join_all(self.inner.clients.iter().map(|c| c.active_connections())).await?; - let total_size = if all_connections.is_empty() { - return Ok(Vec::new()); - } else { - all_connections.len() * all_connections[0].len() - }; - let mut out = Vec::with_capacity(total_size); - - for connections in all_connections.into_iter() { - out.extend(connections); - } - Ok(out) - } + /// + /// This may contain duplicates when separate clients are connected to the same server. + fn active_connections(&self) -> Vec { + self.inner.clients.iter().flat_map(|c| c.active_connections()).collect() } /// Override the DNS resolution logic for all clients in the pool. @@ -214,7 +206,7 @@ impl ClientLike for RedisPool { /// /// This function returns a `JoinHandle` to a task that drives **all** connections via [join](https://docs.rs/futures/latest/futures/macro.join.html). /// - /// See [connect_pool](crate::clients::RedisPool::connect_pool) for a variation of this function that separates the + /// See [connect_pool](crate::clients::Pool::connect_pool) for a variation of this function that separates the /// connection tasks. /// /// See [init](Self::init) for an alternative shorthand. @@ -226,14 +218,14 @@ impl ClientLike for RedisPool { result??; } - Ok::<(), RedisError>(()) + Ok::<(), Error>(()) }) } /// Force a reconnection to the server(s) for each client. /// /// When running against a cluster this function will also refresh the cached cluster routing table. - fn force_reconnection(&self) -> impl Future> + Send { + fn force_reconnection(&self) -> impl Future> + Send { async move { try_join_all(self.inner.clients.iter().map(|c| c.force_reconnection())).await?; Ok(()) @@ -241,7 +233,7 @@ impl ClientLike for RedisPool { } /// Wait for all the clients to connect to the server. - fn wait_for_connect(&self) -> impl Future> + Send { + fn wait_for_connect(&self) -> impl Future> + Send { async move { try_join_all(self.inner.clients.iter().map(|c| c.wait_for_connect())).await?; Ok(()) @@ -260,7 +252,7 @@ impl ClientLike for RedisPool { /// use fred::prelude::*; /// /// #[tokio::main] - /// async fn main() -> Result<(), RedisError> { + /// async fn main() -> Result<(), Error> { /// let pool = Builder::default_centralized().build_pool(5)?; /// let connection_task = pool.init().await?; /// @@ -270,7 +262,7 @@ impl ClientLike for RedisPool { /// connection_task.await? /// } /// ``` - fn init(&self) -> impl Future> + Send { + fn init(&self) -> impl Future> + Send { #[allow(unused_mut)] async move { let mut rxs: Vec<_> = self @@ -309,7 +301,7 @@ impl ClientLike for RedisPool { /// /// This function will also close all error, pubsub message, and reconnection event streams on all clients in the /// pool. - fn quit(&self) -> impl Future> + Send { + fn quit(&self) -> impl Future> + Send { async move { join_all(self.inner.clients.iter().map(|c| c.quit())).await; @@ -319,17 +311,17 @@ impl ClientLike for RedisPool { } #[rm_send_if(feature = "glommio")] -impl HeartbeatInterface for RedisPool { +impl HeartbeatInterface for Pool { fn enable_heartbeat( &self, interval: Duration, break_on_error: bool, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { loop { sleep(interval).await; - if let Err(error) = try_join_all(self.inner.clients.iter().map(|c| c.ping::<()>())).await { + if let Err(error) = try_join_all(self.inner.clients.iter().map(|c| c.ping::<()>(None))).await { if break_on_error { return Err(error); } @@ -341,71 +333,71 @@ impl HeartbeatInterface for RedisPool { #[cfg(feature = "i-acl")] #[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] -impl AclInterface for RedisPool {} +impl AclInterface for Pool {} #[cfg(feature = "i-client")] #[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] -impl ClientInterface for RedisPool {} +impl ClientInterface for Pool {} #[cfg(feature = "i-cluster")] #[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] -impl ClusterInterface for RedisPool {} +impl ClusterInterface for Pool {} #[cfg(feature = "i-config")] #[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] -impl ConfigInterface for RedisPool {} +impl ConfigInterface for Pool {} #[cfg(feature = "i-geo")] #[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] -impl GeoInterface for RedisPool {} +impl GeoInterface for Pool {} #[cfg(feature = "i-hashes")] #[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] -impl HashesInterface for RedisPool {} +impl HashesInterface for Pool {} #[cfg(feature = "i-hyperloglog")] #[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] -impl HyperloglogInterface for RedisPool {} +impl HyperloglogInterface for Pool {} #[cfg(feature = "transactions")] #[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] -impl TransactionInterface for RedisPool {} +impl TransactionInterface for Pool {} #[cfg(feature = "i-keys")] #[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] -impl KeysInterface for RedisPool {} +impl KeysInterface for Pool {} #[cfg(feature = "i-scripts")] #[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] -impl LuaInterface for RedisPool {} +impl LuaInterface for Pool {} #[cfg(feature = "i-lists")] #[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] -impl ListInterface for RedisPool {} +impl ListInterface for Pool {} #[cfg(feature = "i-memory")] #[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] -impl MemoryInterface for RedisPool {} +impl MemoryInterface for Pool {} #[cfg(feature = "i-server")] #[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] -impl ServerInterface for RedisPool {} +impl ServerInterface for Pool {} #[cfg(feature = "i-slowlog")] #[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] -impl SlowlogInterface for RedisPool {} +impl SlowlogInterface for Pool {} #[cfg(feature = "i-sets")] #[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] -impl SetsInterface for RedisPool {} +impl SetsInterface for Pool {} #[cfg(feature = "i-sorted-sets")] #[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] -impl SortedSetsInterface for RedisPool {} +impl SortedSetsInterface for Pool {} #[cfg(feature = "i-streams")] #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] -impl StreamsInterface for RedisPool {} +impl StreamsInterface for Pool {} #[cfg(feature = "i-scripts")] #[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] -impl FunctionInterface for RedisPool {} +impl FunctionInterface for Pool {} #[cfg(feature = "i-redis-json")] #[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] -impl RedisJsonInterface for RedisPool {} +impl RedisJsonInterface for Pool {} #[cfg(feature = "i-time-series")] #[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] -impl TimeSeriesInterface for RedisPool {} +impl TimeSeriesInterface for Pool {} #[cfg(feature = "i-redisearch")] #[cfg_attr(docsrs, doc(cfg(feature = "i-redisearch")))] -impl RediSearchInterface for RedisPool {} +impl RediSearchInterface for Pool {} #[cfg(not(feature = "glommio"))] -struct PoolInner { - clients: Vec>>, +struct ExclusivePoolInner { + clients: Vec>>, counter: AtomicUsize, } @@ -424,17 +416,17 @@ struct PoolInner { /// EXEC /// ``` /// -/// Unlike [RedisPool](crate::clients::RedisPool), this pooling interface does not directly implement +/// Unlike [RedisPool](crate::clients::Pool), this pooling interface does not directly implement /// [ClientLike](crate::interfaces::ClientLike). Callers acquire and release clients via the returned /// [MutexGuard](OwnedMutexGuard). /// /// ```rust /// use fred::{ -/// clients::{ExclusivePool, RedisPool}, +/// clients::{ExclusivePool, Pool}, /// prelude::*, /// }; /// -/// async fn example() -> Result<(), RedisError> { +/// async fn example() -> Result<(), Error> { /// let builder = Builder::default_centralized(); /// let shared_pool = builder.build_pool(5)?; /// let exclusive_pool = builder.build_exclusive_pool(5)?; @@ -469,7 +461,7 @@ struct PoolInner { #[cfg(not(feature = "glommio"))] #[derive(Clone)] pub struct ExclusivePool { - inner: RefCount, + inner: RefCount, } #[cfg(not(feature = "glommio"))] @@ -487,18 +479,18 @@ impl ExclusivePool { /// /// See the [builder](crate::types::Builder) interface for more information. pub fn new( - config: RedisConfig, + config: Config, perf: Option, connection: Option, policy: Option, size: usize, - ) -> Result { + ) -> Result { if size == 0 { - Err(RedisError::new(RedisErrorKind::Config, "Pool cannot be empty.")) + Err(Error::new(ErrorKind::Config, "Pool cannot be empty.")) } else { let mut clients = Vec::with_capacity(size); for _ in 0 .. size { - clients.push(RefCount::new(AsyncMutex::new(RedisClient::new( + clients.push(RefCount::new(AsyncMutex::new(Client::new( config.clone(), perf.clone(), connection.clone(), @@ -507,7 +499,7 @@ impl ExclusivePool { } Ok(ExclusivePool { - inner: RefCount::new(PoolInner { + inner: RefCount::new(ExclusivePoolInner { clients, counter: AtomicUsize::new(0), }), @@ -516,7 +508,7 @@ impl ExclusivePool { } /// Read the clients in the pool. - pub fn clients(&self) -> &[RefCount>] { + pub fn clients(&self) -> &[RefCount>] { &self.inner.clients } @@ -535,7 +527,7 @@ impl ExclusivePool { /// /// This function returns a `JoinHandle` to a task that drives **all** connections via [join](https://docs.rs/futures/latest/futures/macro.join.html). /// - /// See [connect_pool](crate::clients::RedisPool::connect_pool) for a variation of this function that separates the + /// See [connect_pool](crate::clients::Pool::connect_pool) for a variation of this function that separates the /// connection tasks. /// /// See [init](Self::init) for an alternative shorthand. @@ -553,7 +545,7 @@ impl ExclusivePool { /// Force a reconnection to the server(s) for each client. /// /// When running against a cluster this function will also refresh the cached cluster routing table. - pub async fn force_reconnection(&self) -> RedisResult<()> { + pub async fn force_reconnection(&self) -> FredResult<()> { let mut fts = Vec::with_capacity(self.inner.clients.len()); for locked_client in self.inner.clients.iter() { let client = locked_client.clone(); @@ -565,7 +557,7 @@ impl ExclusivePool { } /// Wait for all the clients to connect to the server. - pub async fn wait_for_connect(&self) -> RedisResult<()> { + pub async fn wait_for_connect(&self) -> FredResult<()> { let mut fts = Vec::with_capacity(self.inner.clients.len()); for locked_client in self.inner.clients.iter() { let client = locked_client.clone(); @@ -588,7 +580,7 @@ impl ExclusivePool { /// use fred::prelude::*; /// /// #[tokio::main] - /// async fn main() -> Result<(), RedisError> { + /// async fn main() -> Result<(), Error> { /// let pool = Builder::default_centralized().build_exclusive_pool(5)?; /// let connection_task = pool.init().await?; /// @@ -598,7 +590,7 @@ impl ExclusivePool { /// connection_task.await? /// } /// ``` - pub async fn init(&self) -> RedisResult { + pub async fn init(&self) -> FredResult { let mut rxs = Vec::with_capacity(self.inner.clients.len()); for locked_client in self.inner.clients.iter() { let mut rx = { @@ -639,7 +631,7 @@ impl ExclusivePool { } /// Read the client that should run the next command. - pub async fn acquire(&self) -> OwnedMutexGuard { + pub async fn acquire(&self) -> OwnedMutexGuard { let mut idx = utils::incr_atomic(&self.inner.counter) % self.inner.clients.len(); for _ in 0 .. self.inner.clients.len() { @@ -653,8 +645,8 @@ impl ExclusivePool { self.inner.clients[idx].clone().lock_owned().await } - /// Update the internal [PerformanceConfig](crate::types::PerformanceConfig) on each client in place with new - /// values. + /// Update the internal [PerformanceConfig](crate::types::config::PerformanceConfig) on each client in place with + /// new values. pub async fn update_perf_config(&self, config: PerformanceConfig) { for client in self.inner.clients.iter() { client.lock().await.update_perf_config(config.clone()); @@ -678,7 +670,7 @@ impl ExclusivePool { /// /// This function will also close all error, pubsub message, and reconnection event streams on all clients in the /// pool. - pub async fn quit(&self) -> RedisResult<()> { + pub async fn quit(&self) -> FredResult<()> { let mut fts = Vec::with_capacity(self.inner.clients.len()); for locked_client in self.inner.clients.iter() { let client = locked_client.clone(); diff --git a/src/clients/pubsub.rs b/src/clients/pubsub.rs index bb109309..42cae732 100644 --- a/src/clients/pubsub.rs +++ b/src/clients/pubsub.rs @@ -1,11 +1,15 @@ use crate::{ commands, - error::RedisError, + error::Error, interfaces::*, - modules::inner::RedisClientInner, - prelude::RedisClient, + modules::inner::ClientInner, + prelude::Client, runtime::{spawn, JoinHandle, RefCount, RwLock}, - types::{ConnectionConfig, MultipleStrings, PerformanceConfig, ReconnectPolicy, RedisConfig, RedisKey}, + types::{ + config::{Config, ConnectionConfig, PerformanceConfig, ReconnectPolicy}, + Key, + MultipleStrings, + }, util::group_by_hash_slot, }; use bytes_utils::Str; @@ -25,7 +29,7 @@ type ChannelSet = RefCount>>; /// use fred::clients::SubscriberClient; /// use fred::prelude::*; /// -/// async fn example() -> Result<(), RedisError> { +/// async fn example() -> Result<(), Error> { /// let subscriber = Builder::default_centralized().build_subscriber_client()?; /// subscriber.init().await?; /// @@ -58,7 +62,7 @@ pub struct SubscriberClient { channels: ChannelSet, patterns: ChannelSet, shard_channels: ChannelSet, - inner: RefCount, + inner: RefCount, } impl fmt::Debug for SubscriberClient { @@ -74,7 +78,7 @@ impl fmt::Debug for SubscriberClient { impl ClientLike for SubscriberClient { #[doc(hidden)] - fn inner(&self) -> &RefCount { + fn inner(&self) -> &RefCount { &self.inner } } @@ -156,7 +160,7 @@ impl RediSearchInterface for SubscriberClient {} #[cfg_attr(docsrs, doc(cfg(feature = "i-pubsub")))] #[rm_send_if(feature = "glommio")] impl PubsubInterface for SubscriberClient { - fn subscribe(&self, channels: S) -> impl Future> + Send + fn subscribe(&self, channels: S) -> impl Future> + Send where S: Into + Send, { @@ -178,7 +182,7 @@ impl PubsubInterface for SubscriberClient { } } - fn unsubscribe(&self, channels: S) -> impl Future> + Send + fn unsubscribe(&self, channels: S) -> impl Future> + Send where S: Into + Send, { @@ -203,7 +207,7 @@ impl PubsubInterface for SubscriberClient { } } - fn psubscribe(&self, patterns: S) -> impl Future> + Send + fn psubscribe(&self, patterns: S) -> impl Future> + Send where S: Into + Send, { @@ -224,7 +228,7 @@ impl PubsubInterface for SubscriberClient { } } - fn punsubscribe(&self, patterns: S) -> impl Future> + Send + fn punsubscribe(&self, patterns: S) -> impl Future> + Send where S: Into + Send, { @@ -249,7 +253,7 @@ impl PubsubInterface for SubscriberClient { } } - fn ssubscribe(&self, channels: C) -> impl Future> + Send + fn ssubscribe(&self, channels: C) -> impl Future> + Send where C: Into + Send, { @@ -270,7 +274,7 @@ impl PubsubInterface for SubscriberClient { } } - fn sunsubscribe(&self, channels: C) -> impl Future> + Send + fn sunsubscribe(&self, channels: C) -> impl Future> + Send where C: Into + Send, { @@ -301,7 +305,7 @@ impl SubscriberClient { /// /// See the [builder](crate::types::Builder) interface for more information. pub fn new( - config: RedisConfig, + config: Config, perf: Option, connection: Option, policy: Option, @@ -310,7 +314,7 @@ impl SubscriberClient { channels: RefCount::new(RwLock::new(BTreeSet::new())), patterns: RefCount::new(RwLock::new(BTreeSet::new())), shard_channels: RefCount::new(RwLock::new(BTreeSet::new())), - inner: RedisClientInner::new(config, perf.unwrap_or_default(), connection.unwrap_or_default(), policy), + inner: ClientInner::new(config, perf.unwrap_or_default(), connection.unwrap_or_default(), policy), } } @@ -319,7 +323,7 @@ impl SubscriberClient { /// The returned client will not be connected to the server, and it will use new connections after connecting. /// However, it will manage the same channel subscriptions as the original client. pub fn clone_new(&self) -> Self { - let inner = RedisClientInner::new( + let inner = ClientInner::new( self.inner.config.as_ref().clone(), self.inner.performance_config(), self.inner.connection.as_ref().clone(), @@ -371,10 +375,10 @@ impl SubscriberClient { /// Re-subscribe to any tracked channels and patterns. /// /// This can be used to sync the client's subscriptions with the server after calling `QUIT`, then `connect`, etc. - pub async fn resubscribe_all(&self) -> Result<(), RedisError> { - let channels: Vec = self.tracked_channels().into_iter().map(|s| s.into()).collect(); - let patterns: Vec = self.tracked_patterns().into_iter().map(|s| s.into()).collect(); - let shard_channels: Vec = self.tracked_shard_channels().into_iter().map(|s| s.into()).collect(); + pub async fn resubscribe_all(&self) -> Result<(), Error> { + let channels: Vec = self.tracked_channels().into_iter().map(|s| s.into()).collect(); + let patterns: Vec = self.tracked_patterns().into_iter().map(|s| s.into()).collect(); + let shard_channels: Vec = self.tracked_shard_channels().into_iter().map(|s| s.into()).collect(); self.subscribe(channels).await?; self.psubscribe(patterns).await?; @@ -389,16 +393,16 @@ impl SubscriberClient { } /// Unsubscribe from all tracked channels and patterns, and remove them from the client cache. - pub async fn unsubscribe_all(&self) -> Result<(), RedisError> { - let channels: Vec = mem::take(&mut *self.channels.write()) + pub async fn unsubscribe_all(&self) -> Result<(), Error> { + let channels: Vec = mem::take(&mut *self.channels.write()) .into_iter() .map(|s| s.into()) .collect(); - let patterns: Vec = mem::take(&mut *self.patterns.write()) + let patterns: Vec = mem::take(&mut *self.patterns.write()) .into_iter() .map(|s| s.into()) .collect(); - let shard_channels: Vec = mem::take(&mut *self.shard_channels.write()) + let shard_channels: Vec = mem::take(&mut *self.shard_channels.write()) .into_iter() .map(|s| s.into()) .collect(); @@ -419,7 +423,7 @@ impl SubscriberClient { /// Create a new `RedisClient`, reusing the existing connection(s). /// /// Note: most non-pubsub commands are only supported when using RESP3. - pub fn to_client(&self) -> RedisClient { - RedisClient::from(&self.inner) + pub fn to_client(&self) -> Client { + Client::from(&self.inner) } } diff --git a/src/clients/replica.rs b/src/clients/replica.rs index 5a07590b..5a4f5d94 100644 --- a/src/clients/replica.rs +++ b/src/clients/replica.rs @@ -1,11 +1,11 @@ use crate::{ - clients::{Pipeline, RedisClient}, - error::RedisError, + clients::{Client, Pipeline}, + error::Error, interfaces::{self, *}, - modules::inner::RedisClientInner, - protocol::command::{RedisCommand, RouterCommand}, + modules::inner::ClientInner, + protocol::command::{Command, RouterCommand}, runtime::{oneshot_channel, RefCount}, - types::Server, + types::config::Server, }; use std::{collections::HashMap, fmt, fmt::Formatter}; @@ -19,114 +19,125 @@ use std::{collections::HashMap, fmt, fmt::Formatter}; /// [Redis replication is asynchronous](https://redis.io/docs/management/replication/). #[derive(Clone)] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] -pub struct Replicas { - inner: RefCount, +pub struct Replicas { + pub(crate) client: C, } -impl fmt::Debug for Replicas { +impl fmt::Debug for Replicas { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("Replicas").field("id", &self.inner.id).finish() + f.debug_struct("Replicas").field("id", &self.client.inner().id).finish() } } #[doc(hidden)] -impl From<&RefCount> for Replicas { - fn from(inner: &RefCount) -> Self { - Replicas { inner: inner.clone() } +impl From<&RefCount> for Replicas { + fn from(inner: &RefCount) -> Self { + Replicas { + client: Client::from(inner), + } } } -impl ClientLike for Replicas { +impl ClientLike for Replicas { #[doc(hidden)] - fn inner(&self) -> &RefCount { - &self.inner + fn inner(&self) -> &RefCount { + self.client.inner() } #[doc(hidden)] - fn change_command(&self, command: &mut RedisCommand) { + fn change_command(&self, command: &mut Command) { command.use_replica = true; + self.client.change_command(command); + } + + #[doc(hidden)] + fn send_command(&self, command: T) -> Result<(), Error> + where + T: Into, + { + self.client.send_command(command) } } #[cfg(feature = "i-redis-json")] #[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] -impl RedisJsonInterface for Replicas {} +impl RedisJsonInterface for Replicas {} #[cfg(feature = "i-time-series")] #[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] -impl TimeSeriesInterface for Replicas {} +impl TimeSeriesInterface for Replicas {} #[cfg(feature = "i-cluster")] #[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] -impl ClusterInterface for Replicas {} +impl ClusterInterface for Replicas {} #[cfg(feature = "i-config")] #[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] -impl ConfigInterface for Replicas {} +impl ConfigInterface for Replicas {} #[cfg(feature = "i-geo")] #[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] -impl GeoInterface for Replicas {} +impl GeoInterface for Replicas {} #[cfg(feature = "i-hashes")] #[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] -impl HashesInterface for Replicas {} +impl HashesInterface for Replicas {} #[cfg(feature = "i-hyperloglog")] #[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] -impl HyperloglogInterface for Replicas {} +impl HyperloglogInterface for Replicas {} #[cfg(feature = "i-keys")] #[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] -impl KeysInterface for Replicas {} +impl KeysInterface for Replicas {} #[cfg(feature = "i-scripts")] #[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] -impl LuaInterface for Replicas {} +impl LuaInterface for Replicas {} #[cfg(feature = "i-lists")] #[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] -impl ListInterface for Replicas {} +impl ListInterface for Replicas {} #[cfg(feature = "i-memory")] #[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] -impl MemoryInterface for Replicas {} +impl MemoryInterface for Replicas {} #[cfg(feature = "i-server")] #[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] -impl ServerInterface for Replicas {} +impl ServerInterface for Replicas {} #[cfg(feature = "i-slowlog")] #[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] -impl SlowlogInterface for Replicas {} +impl SlowlogInterface for Replicas {} #[cfg(feature = "i-sets")] #[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] -impl SetsInterface for Replicas {} +impl SetsInterface for Replicas {} #[cfg(feature = "i-sorted-sets")] #[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] -impl SortedSetsInterface for Replicas {} +impl SortedSetsInterface for Replicas {} #[cfg(feature = "i-streams")] #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] -impl StreamsInterface for Replicas {} +impl StreamsInterface for Replicas {} #[cfg(feature = "i-scripts")] #[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] -impl FunctionInterface for Replicas {} +impl FunctionInterface for Replicas {} #[cfg(feature = "i-redisearch")] #[cfg_attr(docsrs, doc(cfg(feature = "i-redisearch")))] -impl RediSearchInterface for Replicas {} +impl RediSearchInterface for Replicas {} -impl Replicas { +impl Replicas { /// Read a mapping of replica server IDs to primary server IDs. pub fn nodes(&self) -> HashMap { - self.inner.server_state.read().replicas.clone() + self.client.inner().server_state.read().replicas.clone() } /// Send a series of commands in a [pipeline](https://redis.io/docs/manual/pipelining/). - pub fn pipeline(&self) -> Pipeline { + pub fn pipeline(&self) -> Pipeline> { Pipeline::from(self.clone()) } - /// Read the underlying [RedisClient](crate::clients::RedisClient) that interacts with primary nodes. - pub fn client(&self) -> RedisClient { - RedisClient::from(&self.inner) + /// Read the underlying [RedisClient](crate::clients::Client) that interacts with primary nodes. + pub fn client(&self) -> Client { + Client::from(self.client.inner()) } /// Sync the cached replica routing table with the server(s). /// /// If `reset: true` the client will forcefully disconnect from replicas even if the connections could otherwise be /// reused. - pub async fn sync(&self, reset: bool) -> Result<(), RedisError> { + pub async fn sync(&self, reset: bool) -> Result<(), Error> { let (tx, rx) = oneshot_channel(); let cmd = RouterCommand::SyncReplicas { tx, reset }; - interfaces::send_to_router(&self.inner, cmd)?; + interfaces::send_to_router(self.client.inner(), cmd)?; rx.await? } } diff --git a/src/clients/sentinel.rs b/src/clients/sentinel.rs index a11f15a7..4a2f4637 100644 --- a/src/clients/sentinel.rs +++ b/src/clients/sentinel.rs @@ -1,28 +1,28 @@ use crate::{ interfaces::*, - modules::inner::RedisClientInner, + modules::inner::ClientInner, runtime::RefCount, - types::{ConnectionConfig, PerformanceConfig, ReconnectPolicy, SentinelConfig}, + types::config::{ConnectionConfig, PerformanceConfig, ReconnectPolicy, SentinelConfig}, }; use std::fmt; /// A struct for interacting directly with Sentinel nodes. /// /// This struct **will not** communicate with Redis servers behind the sentinel interface, but rather with the -/// sentinel nodes themselves. Callers should use the [RedisClient](crate::clients::RedisClient) interface with a -/// [ServerConfig::Sentinel](crate::types::ServerConfig::Sentinel) for interacting with Redis services behind a -/// sentinel layer. +/// sentinel nodes themselves. Callers should use the [RedisClient](crate::clients::Client) interface with a +/// [ServerConfig::Sentinel](crate::types::config::ServerConfig::Sentinel) for interacting with Redis services behind +/// a sentinel layer. /// /// See the [sentinel API docs](https://redis.io/topics/sentinel#sentinel-api) for more information. #[derive(Clone)] #[cfg_attr(docsrs, doc(cfg(feature = "sentinel-client")))] pub struct SentinelClient { - inner: RefCount, + inner: RefCount, } impl ClientLike for SentinelClient { #[doc(hidden)] - fn inner(&self) -> &RefCount { + fn inner(&self) -> &RefCount { &self.inner } } @@ -37,8 +37,8 @@ impl fmt::Debug for SentinelClient { } #[doc(hidden)] -impl<'a> From<&'a RefCount> for SentinelClient { - fn from(inner: &'a RefCount) -> Self { +impl<'a> From<&'a RefCount> for SentinelClient { + fn from(inner: &'a RefCount) -> Self { SentinelClient { inner: inner.clone() } } } @@ -71,7 +71,7 @@ impl SentinelClient { policy: Option, ) -> SentinelClient { SentinelClient { - inner: RedisClientInner::new( + inner: ClientInner::new( config.into(), perf.unwrap_or_default(), connection.unwrap_or_default(), diff --git a/src/clients/transaction.rs b/src/clients/transaction.rs index 2566613b..23199395 100644 --- a/src/clients/transaction.rs +++ b/src/clients/transaction.rs @@ -1,34 +1,37 @@ use crate::{ - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces, interfaces::*, - modules::inner::RedisClientInner, - prelude::RedisValue, + modules::inner::ClientInner, + prelude::Value, protocol::{ - command::{RedisCommand, RedisCommandKind, RouterCommand}, + command::{Command, CommandKind, RouterCommand}, hashers::ClusterHash, responders::ResponseKind, utils as protocol_utils, }, - runtime::{oneshot_channel, AtomicBool, Mutex, RefCount}, - types::{FromRedis, MultipleKeys, Options, RedisKey, Server}, + runtime::{oneshot_channel, Mutex, RefCount}, + types::{ + config::{Options, Server}, + FromValue, + Key, + }, utils, }; use std::{collections::VecDeque, fmt}; struct State { id: u64, - commands: Mutex>, - watched: Mutex>, + commands: Mutex>, + watched: Mutex>, hash_slot: Mutex>, - pipelined: AtomicBool, } /// A cheaply cloneable transaction block. #[derive(Clone)] #[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] pub struct Transaction { - inner: RefCount, + inner: RefCount, state: RefCount, } @@ -39,7 +42,6 @@ impl fmt::Debug for Transaction { .field("id", &self.state.id) .field("length", &self.state.commands.lock().len()) .field("hash_slot", &self.state.hash_slot.lock()) - .field("pipelined", &utils::read_bool_atomic(&self.state.pipelined)) .finish() } } @@ -54,16 +56,17 @@ impl Eq for Transaction {} impl ClientLike for Transaction { #[doc(hidden)] - fn inner(&self) -> &RefCount { + fn inner(&self) -> &RefCount { &self.inner } #[doc(hidden)] - fn send_command(&self, command: C) -> Result<(), RedisError> + fn send_command(&self, command: C) -> Result<(), Error> where - C: Into, + C: Into, { - let mut command: RedisCommand = command.into(); + let mut command: Command = command.into(); + self.disallow_all_cluster_commands(&command)?; // check cluster slot mappings as commands are added self.update_hash_slot(&command)?; @@ -141,21 +144,20 @@ impl RediSearchInterface for Transaction {} impl Transaction { /// Create a new transaction. - pub(crate) fn from_inner(inner: &RefCount) -> Self { + pub(crate) fn from_inner(inner: &RefCount) -> Self { Transaction { inner: inner.clone(), state: RefCount::new(State { commands: Mutex::new(VecDeque::new()), watched: Mutex::new(VecDeque::new()), hash_slot: Mutex::new(None), - pipelined: AtomicBool::new(false), id: utils::random_u64(u64::MAX), }), } } /// Check and update the hash slot for the transaction. - pub(crate) fn update_hash_slot(&self, command: &RedisCommand) -> Result<(), RedisError> { + pub(crate) fn update_hash_slot(&self, command: &Command) -> Result<(), Error> { if !self.inner.config.server.is_clustered() { return Ok(()); } @@ -172,8 +174,8 @@ impl Transaction { })?; if old_server != server { - return Err(RedisError::new( - RedisErrorKind::Cluster, + return Err(Error::new( + ErrorKind::Cluster, "All transaction commands must use the same cluster node.", )); } @@ -185,10 +187,10 @@ impl Transaction { Ok(()) } - pub(crate) fn disallow_all_cluster_commands(&self, command: &RedisCommand) -> Result<(), RedisError> { + pub(crate) fn disallow_all_cluster_commands(&self, command: &Command) -> Result<(), Error> { if command.is_all_cluster_nodes() { - Err(RedisError::new( - RedisErrorKind::Cluster, + Err(Error::new( + ErrorKind::Cluster, "Cannot use concurrent cluster commands inside a transaction.", )) } else { @@ -213,22 +215,6 @@ impl Transaction { self.state.commands.lock().len() } - /// Whether to pipeline commands in the transaction. - /// - /// Note: pipelined transactions should only be used with Redis version >=2.6.5. - pub fn pipeline(&self, val: bool) { - utils::set_bool_atomic(&self.state.pipelined, val); - } - - /// Read the number of keys to `WATCH` before the starting the transaction. - #[deprecated( - since = "9.2.0", - note = "Please use `WATCH` with clients from an `ExclusivePool` instead." - )] - pub fn watched_len(&self) -> usize { - self.state.watched.lock().len() - } - /// Executes all previously queued commands in a transaction. /// /// If `abort_on_error` is `true` the client will automatically send `DISCARD` if an error is received from @@ -240,7 +226,7 @@ impl Transaction { /// ```rust no_run /// # use fred::prelude::*; /// - /// async fn example(client: &RedisClient) -> Result<(), RedisError> { + /// async fn example(client: &Client) -> Result<(), Error> { /// let _ = client.mset(vec![("foo", 1), ("bar", 2)]).await?; /// /// let trx = client.multi(); @@ -252,9 +238,9 @@ impl Transaction { /// Ok(()) /// } /// ``` - pub async fn exec(&self, abort_on_error: bool) -> Result + pub async fn exec(&self, abort_on_error: bool) -> Result where - R: FromRedis, + R: FromValue, { let commands = { self @@ -265,34 +251,14 @@ impl Transaction { .map(|cmd| cmd.duplicate(ResponseKind::Skip)) .collect() }; - let pipelined = utils::read_bool_atomic(&self.state.pipelined); let hash_slot = utils::read_mutex(&self.state.hash_slot); - exec( - &self.inner, - commands, - hash_slot, - abort_on_error, - pipelined, - self.state.id, - ) - .await? - .convert() - } - - /// Send the `WATCH` command with the provided keys before starting the transaction. - #[deprecated( - since = "9.2.0", - note = "Please use `WATCH` with clients from an `ExclusivePool` instead." - )] - pub fn watch_before(&self, keys: K) - where - K: Into, - { - self.state.watched.lock().extend(keys.into().inner()); + exec(&self.inner, commands, hash_slot, abort_on_error, self.state.id) + .await? + .convert() } - /// Read the hash slot against which this transaction will run, if known. + /// Read the hash slot against which this transaction will run, if known. pub fn hash_slot(&self) -> Option { utils::read_mutex(&self.state.hash_slot) } @@ -310,30 +276,28 @@ impl Transaction { } async fn exec( - inner: &RefCount, - commands: VecDeque, + inner: &RefCount, + commands: VecDeque, hash_slot: Option, abort_on_error: bool, - pipelined: bool, id: u64, -) -> Result { +) -> Result { if commands.is_empty() { - return Ok(RedisValue::Null); + return Ok(Value::Null); } let (tx, rx) = oneshot_channel(); let trx_options = Options::from_command(&commands[0]); - let mut multi = RedisCommand::new(RedisCommandKind::Multi, vec![]); + let mut multi = Command::new(CommandKind::Multi, vec![]); trx_options.apply(&mut multi); - let commands: Vec = [multi] + let commands: Vec = [multi] .into_iter() .chain(commands.into_iter()) .map(|mut command| { command.inherit_options(inner); command.response = ResponseKind::Skip; - command.can_pipeline = false; - command.skip_backpressure = true; + command.can_pipeline = true; command.transaction_id = Some(id); command.use_replica = false; if let Some(hash_slot) = hash_slot.as_ref() { @@ -353,7 +317,6 @@ async fn exec( id, tx, commands, - pipelined, abort_on_error, }; let timeout_dur = trx_options.timeout.unwrap_or_else(|| inner.default_command_timeout()); diff --git a/src/commands/impls/acl.rs b/src/commands/impls/acl.rs index f15a2e8d..dd505ef9 100644 --- a/src/commands/impls/acl.rs +++ b/src/commands/impls/acl.rs @@ -1,6 +1,6 @@ use super::*; use crate::{ - protocol::{command::RedisCommandKind, utils as protocol_utils}, + protocol::{command::CommandKind, utils as protocol_utils}, types::*, utils, }; @@ -12,7 +12,7 @@ values_cmd!(acl_list, AclList); values_cmd!(acl_users, AclUsers); value_cmd!(acl_whoami, AclWhoAmI); -pub async fn acl_setuser(client: &C, username: Str, rules: MultipleValues) -> Result<(), RedisError> { +pub async fn acl_setuser(client: &C, username: Str, rules: MultipleValues) -> Result<(), Error> { let frame = utils::request_response(client, move || { let rules = rules.into_multiple_values(); let mut args = Vec::with_capacity(rules.len() + 1); @@ -21,7 +21,7 @@ pub async fn acl_setuser(client: &C, username: Str, rules: Multip for rule in rules.into_iter() { args.push(rule); } - Ok((RedisCommandKind::AclSetUser, args)) + Ok((CommandKind::AclSetUser, args)) }) .await?; @@ -29,51 +29,51 @@ pub async fn acl_setuser(client: &C, username: Str, rules: Multip protocol_utils::expect_ok(&response) } -pub async fn acl_getuser(client: &C, username: RedisValue) -> Result { - one_arg_value_cmd(client, RedisCommandKind::AclGetUser, username).await +pub async fn acl_getuser(client: &C, username: Value) -> Result { + one_arg_value_cmd(client, CommandKind::AclGetUser, username).await } -pub async fn acl_deluser(client: &C, usernames: MultipleKeys) -> Result { - let args: Vec = usernames.inner().into_iter().map(|k| k.into()).collect(); - let frame = utils::request_response(client, move || Ok((RedisCommandKind::AclDelUser, args))).await?; +pub async fn acl_deluser(client: &C, usernames: MultipleKeys) -> Result { + let args: Vec = usernames.inner().into_iter().map(|k| k.into()).collect(); + let frame = utils::request_response(client, move || Ok((CommandKind::AclDelUser, args))).await?; protocol_utils::frame_to_results(frame) } -pub async fn acl_cat(client: &C, category: Option) -> Result { - let args: Vec = if let Some(cat) = category { +pub async fn acl_cat(client: &C, category: Option) -> Result { + let args: Vec = if let Some(cat) = category { vec![cat.into()] } else { Vec::new() }; - let frame = utils::request_response(client, move || Ok((RedisCommandKind::AclCat, args))).await?; + let frame = utils::request_response(client, move || Ok((CommandKind::AclCat, args))).await?; protocol_utils::frame_to_results(frame) } -pub async fn acl_genpass(client: &C, bits: Option) -> Result { - let args: Vec = if let Some(bits) = bits { +pub async fn acl_genpass(client: &C, bits: Option) -> Result { + let args: Vec = if let Some(bits) = bits { vec![bits.into()] } else { Vec::new() }; - let frame = utils::request_response(client, move || Ok((RedisCommandKind::AclGenPass, args))).await?; + let frame = utils::request_response(client, move || Ok((CommandKind::AclGenPass, args))).await?; protocol_utils::frame_to_results(frame) } -pub async fn acl_log_reset(client: &C) -> Result<(), RedisError> { - let frame = utils::request_response(client, || Ok((RedisCommandKind::AclLog, vec![static_val!(RESET)]))).await?; +pub async fn acl_log_reset(client: &C) -> Result<(), Error> { + let frame = utils::request_response(client, || Ok((CommandKind::AclLog, vec![static_val!(RESET)]))).await?; let response = protocol_utils::frame_to_results(frame)?; protocol_utils::expect_ok(&response) } -pub async fn acl_log_count(client: &C, count: Option) -> Result { - let args: Vec = if let Some(count) = count { +pub async fn acl_log_count(client: &C, count: Option) -> Result { + let args: Vec = if let Some(count) = count { vec![count.into()] } else { Vec::new() }; - let frame = utils::request_response(client, move || Ok((RedisCommandKind::AclLog, args))).await?; + let frame = utils::request_response(client, move || Ok((CommandKind::AclLog, args))).await?; protocol_utils::frame_to_results(frame) } diff --git a/src/commands/impls/client.rs b/src/commands/impls/client.rs index 2ad01fcf..3538ac6a 100644 --- a/src/commands/impls/client.rs +++ b/src/commands/impls/client.rs @@ -1,10 +1,10 @@ use super::*; use crate::{ protocol::{ - command::{RedisCommand, RedisCommandKind}, + command::{Command, CommandKind}, utils as protocol_utils, }, - types::*, + types::{client::*, ClientUnblockFlag, Key}, utils, }; use bytes_utils::Str; @@ -12,10 +12,7 @@ use bytes_utils::Str; value_cmd!(client_id, ClientID); value_cmd!(client_info, ClientInfo); -pub async fn client_kill( - client: &C, - filters: Vec, -) -> Result { +pub async fn client_kill(client: &C, filters: Vec) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(filters.len() * 2); @@ -25,7 +22,7 @@ pub async fn client_kill( args.push(value.into()); } - Ok((RedisCommandKind::ClientKill, args)) + Ok((CommandKind::ClientKill, args)) }) .await?; @@ -36,8 +33,8 @@ pub async fn client_list( client: &C, r#type: Option, ids: Option>, -) -> Result { - let ids: Option> = ids.map(|ids| ids.into_iter().map(|id| id.into()).collect()); +) -> Result { + let ids: Option> = ids.map(|ids| ids.into_iter().map(|id| id.into()).collect()); let frame = utils::request_response(client, move || { let max_args = 2 + ids.as_ref().map(|i| i.len()).unwrap_or(0); let mut args = Vec::with_capacity(max_args); @@ -56,7 +53,7 @@ pub async fn client_list( } } - Ok((RedisCommandKind::ClientList, args)) + Ok((CommandKind::ClientList, args)) }) .await?; @@ -67,7 +64,7 @@ pub async fn client_pause( client: &C, timeout: i64, mode: Option, -) -> Result<(), RedisError> { +) -> Result<(), Error> { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); args.push(timeout.into()); @@ -76,7 +73,7 @@ pub async fn client_pause( args.push(mode.to_str().into()); } - Ok((RedisCommandKind::ClientPause, args)) + Ok((CommandKind::ClientPause, args)) }) .await?; @@ -86,18 +83,17 @@ pub async fn client_pause( value_cmd!(client_getname, ClientGetName); -pub async fn client_setname(client: &C, name: Str) -> Result<(), RedisError> { - let frame = - utils::request_response(client, move || Ok((RedisCommandKind::ClientSetname, vec![name.into()]))).await?; +pub async fn client_setname(client: &C, name: Str) -> Result<(), Error> { + let frame = utils::request_response(client, move || Ok((CommandKind::ClientSetname, vec![name.into()]))).await?; let response = protocol_utils::frame_to_results(frame)?; protocol_utils::expect_ok(&response) } ok_cmd!(client_unpause, ClientUnpause); -pub async fn client_reply(client: &C, flag: ClientReplyFlag) -> Result<(), RedisError> { +pub async fn client_reply(client: &C, flag: ClientReplyFlag) -> Result<(), Error> { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::ClientReply, vec![flag.to_str().into()])) + Ok((CommandKind::ClientReply, vec![flag.to_str().into()])) }) .await?; @@ -107,9 +103,9 @@ pub async fn client_reply(client: &C, flag: ClientReplyFlag) -> R pub async fn client_unblock( client: &C, - id: RedisValue, + id: Value, flag: Option, -) -> Result { +) -> Result { let inner = client.inner(); let mut args = Vec::with_capacity(2); @@ -117,20 +113,20 @@ pub async fn client_unblock( if let Some(flag) = flag { args.push(flag.to_str().into()); } - let command = RedisCommand::new(RedisCommandKind::ClientUnblock, args); + let command = Command::new(CommandKind::ClientUnblock, args); let frame = utils::backchannel_request_response(inner, command, false).await?; protocol_utils::frame_to_results(frame) } -pub async fn unblock_self(client: &C, flag: Option) -> Result<(), RedisError> { +pub async fn unblock_self(client: &C, flag: Option) -> Result<(), Error> { let inner = client.inner(); let flag = flag.unwrap_or(ClientUnblockFlag::Error); let result = utils::interrupt_blocked_connection(inner, flag).await; - inner.backchannel.write().await.set_unblocked(); + inner.backchannel.set_unblocked(); result } -pub async fn echo(client: &C, message: RedisValue) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Echo, message).await +pub async fn echo(client: &C, message: Value) -> Result { + one_arg_value_cmd(client, CommandKind::Echo, message).await } diff --git a/src/commands/impls/cluster.rs b/src/commands/impls/cluster.rs index e0dc093c..8ee74bb3 100644 --- a/src/commands/impls/cluster.rs +++ b/src/commands/impls/cluster.rs @@ -2,11 +2,11 @@ use super::*; use crate::{ interfaces, protocol::{ - command::{RedisCommandKind, RouterCommand}, + command::{CommandKind, RouterCommand}, utils as protocol_utils, }, runtime::oneshot_channel, - types::*, + types::{cluster::*, Key, MultipleHashSlots}, utils, }; use bytes_utils::Str; @@ -19,12 +19,12 @@ value_cmd!(cluster_nodes, ClusterNodes); ok_cmd!(cluster_saveconfig, ClusterSaveConfig); values_cmd!(cluster_slots, ClusterSlots); -pub async fn cluster_info(client: &C) -> Result { - let frame = utils::request_response(client, || Ok((RedisCommandKind::ClusterInfo, vec![]))).await?; +pub async fn cluster_info(client: &C) -> Result { + let frame = utils::request_response(client, || Ok((CommandKind::ClusterInfo, vec![]))).await?; protocol_utils::frame_to_results(frame) } -pub async fn cluster_add_slots(client: &C, slots: MultipleHashSlots) -> Result<(), RedisError> { +pub async fn cluster_add_slots(client: &C, slots: MultipleHashSlots) -> Result<(), Error> { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(slots.len()); @@ -32,7 +32,7 @@ pub async fn cluster_add_slots(client: &C, slots: MultipleHashSlo args.push(slot.into()); } - Ok((RedisCommandKind::ClusterAddSlots, args)) + Ok((CommandKind::ClusterAddSlots, args)) }) .await?; @@ -40,27 +40,24 @@ pub async fn cluster_add_slots(client: &C, slots: MultipleHashSlo protocol_utils::expect_ok(&response) } -pub async fn cluster_count_failure_reports( - client: &C, - node_id: Str, -) -> Result { +pub async fn cluster_count_failure_reports(client: &C, node_id: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::ClusterCountFailureReports, vec![node_id.into()])) + Ok((CommandKind::ClusterCountFailureReports, vec![node_id.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn cluster_count_keys_in_slot(client: &C, slot: u16) -> Result { +pub async fn cluster_count_keys_in_slot(client: &C, slot: u16) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::ClusterCountKeysInSlot, vec![slot.into()])) + Ok((CommandKind::ClusterCountKeysInSlot, vec![slot.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn cluster_del_slots(client: &C, slots: MultipleHashSlots) -> Result<(), RedisError> { +pub async fn cluster_del_slots(client: &C, slots: MultipleHashSlots) -> Result<(), Error> { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(slots.len()); @@ -68,7 +65,7 @@ pub async fn cluster_del_slots(client: &C, slots: MultipleHashSlo args.push(slot.into()); } - Ok((RedisCommandKind::ClusterDelSlots, args)) + Ok((CommandKind::ClusterDelSlots, args)) }) .await?; @@ -76,10 +73,7 @@ pub async fn cluster_del_slots(client: &C, slots: MultipleHashSlo protocol_utils::expect_ok(&response) } -pub async fn cluster_failover( - client: &C, - flag: Option, -) -> Result<(), RedisError> { +pub async fn cluster_failover(client: &C, flag: Option) -> Result<(), Error> { let frame = utils::request_response(client, move || { let args = if let Some(flag) = flag { vec![flag.to_str().into()] @@ -87,7 +81,7 @@ pub async fn cluster_failover( Vec::new() }; - Ok((RedisCommandKind::ClusterFailOver, args)) + Ok((CommandKind::ClusterFailOver, args)) }) .await?; @@ -95,50 +89,46 @@ pub async fn cluster_failover( protocol_utils::expect_ok(&response) } -pub async fn cluster_forget(client: &C, node_id: Str) -> Result<(), RedisError> { - one_arg_ok_cmd(client, RedisCommandKind::ClusterForget, node_id.into()).await +pub async fn cluster_forget(client: &C, node_id: Str) -> Result<(), Error> { + one_arg_ok_cmd(client, CommandKind::ClusterForget, node_id.into()).await } -pub async fn cluster_get_keys_in_slot( - client: &C, - slot: u16, - count: u64, -) -> Result { - let count: RedisValue = count.try_into()?; +pub async fn cluster_get_keys_in_slot(client: &C, slot: u16, count: u64) -> Result { + let count: Value = count.try_into()?; let frame = utils::request_response(client, move || { let args = vec![slot.into(), count]; - Ok((RedisCommandKind::ClusterGetKeysInSlot, args)) + Ok((CommandKind::ClusterGetKeysInSlot, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn cluster_keyslot(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::ClusterKeySlot, key.into()).await +pub async fn cluster_keyslot(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::ClusterKeySlot, key.into()).await } -pub async fn cluster_meet(client: &C, ip: Str, port: u16) -> Result<(), RedisError> { - args_ok_cmd(client, RedisCommandKind::ClusterMeet, vec![ip.into(), port.into()]).await +pub async fn cluster_meet(client: &C, ip: Str, port: u16) -> Result<(), Error> { + args_ok_cmd(client, CommandKind::ClusterMeet, vec![ip.into(), port.into()]).await } -pub async fn cluster_replicate(client: &C, node_id: Str) -> Result<(), RedisError> { - one_arg_ok_cmd(client, RedisCommandKind::ClusterReplicate, node_id.into()).await +pub async fn cluster_replicate(client: &C, node_id: Str) -> Result<(), Error> { + one_arg_ok_cmd(client, CommandKind::ClusterReplicate, node_id.into()).await } -pub async fn cluster_replicas(client: &C, node_id: Str) -> Result { - one_arg_value_cmd(client, RedisCommandKind::ClusterReplicas, node_id.into()).await +pub async fn cluster_replicas(client: &C, node_id: Str) -> Result { + one_arg_value_cmd(client, CommandKind::ClusterReplicas, node_id.into()).await } -pub async fn cluster_reset(client: &C, mode: Option) -> Result<(), RedisError> { +pub async fn cluster_reset(client: &C, mode: Option) -> Result<(), Error> { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1); if let Some(flag) = mode { args.push(flag.to_str().into()); } - Ok((RedisCommandKind::ClusterReset, args)) + Ok((CommandKind::ClusterReset, args)) }) .await?; @@ -146,16 +136,12 @@ pub async fn cluster_reset(client: &C, mode: Option(client: &C, epoch: u64) -> Result<(), RedisError> { - let epoch: RedisValue = epoch.try_into()?; - one_arg_ok_cmd(client, RedisCommandKind::ClusterSetConfigEpoch, epoch).await +pub async fn cluster_set_config_epoch(client: &C, epoch: u64) -> Result<(), Error> { + let epoch: Value = epoch.try_into()?; + one_arg_ok_cmd(client, CommandKind::ClusterSetConfigEpoch, epoch).await } -pub async fn cluster_setslot( - client: &C, - slot: u16, - state: ClusterSetSlotState, -) -> Result<(), RedisError> { +pub async fn cluster_setslot(client: &C, slot: u16, state: ClusterSetSlotState) -> Result<(), Error> { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(3); args.push(slot.into()); @@ -166,7 +152,7 @@ pub async fn cluster_setslot( args.push(arg.into()); } - Ok((RedisCommandKind::ClusterSetSlot, args)) + Ok((CommandKind::ClusterSetSlot, args)) }) .await?; @@ -174,7 +160,7 @@ pub async fn cluster_setslot( protocol_utils::expect_ok(&response) } -pub async fn sync_cluster(client: &C) -> Result<(), RedisError> { +pub async fn sync_cluster(client: &C) -> Result<(), Error> { let (tx, rx) = oneshot_channel(); let command = RouterCommand::SyncCluster { tx }; interfaces::send_to_router(client.inner(), command)?; diff --git a/src/commands/impls/config.rs b/src/commands/impls/config.rs index 78f0e28e..0f3c4a28 100644 --- a/src/commands/impls/config.rs +++ b/src/commands/impls/config.rs @@ -1,14 +1,14 @@ use super::*; -use crate::{protocol::command::RedisCommandKind, types::*}; +use crate::{protocol::command::CommandKind, types::*}; use bytes_utils::Str; ok_cmd!(config_resetstat, ConfigResetStat); ok_cmd!(config_rewrite, ConfigRewrite); -pub async fn config_get(client: &C, parameter: Str) -> Result { - one_arg_values_cmd(client, RedisCommandKind::ConfigGet, parameter.into()).await +pub async fn config_get(client: &C, parameter: Str) -> Result { + one_arg_values_cmd(client, CommandKind::ConfigGet, parameter.into()).await } -pub async fn config_set(client: &C, parameter: Str, value: RedisValue) -> Result<(), RedisError> { - args_ok_cmd(client, RedisCommandKind::ConfigSet, vec![parameter.into(), value]).await +pub async fn config_set(client: &C, parameter: Str, value: Value) -> Result<(), Error> { + args_ok_cmd(client, CommandKind::ConfigSet, vec![parameter.into(), value]).await } diff --git a/src/commands/impls/geo.rs b/src/commands/impls/geo.rs index 4a01479f..fcfccf86 100644 --- a/src/commands/impls/geo.rs +++ b/src/commands/impls/geo.rs @@ -1,8 +1,8 @@ use super::*; use crate::{ - error::RedisError, - protocol::{command::RedisCommandKind, utils as protocol_utils}, - types::*, + error::Error, + protocol::{command::CommandKind, utils as protocol_utils}, + types::{geo::*, Any, Key, MultipleValues, SetOptions, SortOrder, Value}, utils, }; use std::convert::TryInto; @@ -18,11 +18,11 @@ static BY_BOX: &str = "BYBOX"; pub async fn geoadd( client: &C, - key: RedisKey, + key: Key, options: Option, changed: bool, values: MultipleGeoValues, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(3 + (values.len() * 3)); args.push(key.into()); @@ -40,18 +40,14 @@ pub async fn geoadd( args.push(value.member) } - Ok((RedisCommandKind::GeoAdd, args)) + Ok((CommandKind::GeoAdd, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn geohash( - client: &C, - key: RedisKey, - members: MultipleValues, -) -> Result { +pub async fn geohash(client: &C, key: Key, members: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let members = members.into_multiple_values(); let mut args = Vec::with_capacity(1 + members.len()); @@ -61,18 +57,14 @@ pub async fn geohash( args.push(member); } - Ok((RedisCommandKind::GeoHash, args)) + Ok((CommandKind::GeoHash, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn geopos( - client: &C, - key: RedisKey, - members: MultipleValues, -) -> Result { +pub async fn geopos(client: &C, key: Key, members: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let members = members.into_multiple_values(); let mut args = Vec::with_capacity(1 + members.len()); @@ -82,7 +74,7 @@ pub async fn geopos( args.push(member); } - Ok((RedisCommandKind::GeoPos, args)) + Ok((CommandKind::GeoPos, args)) }) .await?; @@ -91,11 +83,11 @@ pub async fn geopos( pub async fn geodist( client: &C, - key: RedisKey, - src: RedisValue, - dest: RedisValue, + key: Key, + src: Value, + dest: Value, unit: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(4); args.push(key.into()); @@ -106,7 +98,7 @@ pub async fn geodist( args.push(unit.to_str().into()); } - Ok((RedisCommandKind::GeoDist, args)) + Ok((CommandKind::GeoDist, args)) }) .await?; @@ -115,7 +107,7 @@ pub async fn geodist( pub async fn georadius( client: &C, - key: RedisKey, + key: Key, position: GeoPosition, radius: f64, unit: GeoUnit, @@ -124,9 +116,9 @@ pub async fn georadius( withhash: bool, count: Option<(u64, Any)>, ord: Option, - store: Option, - storedist: Option, -) -> Result { + store: Option, + storedist: Option, +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(16); args.push(key.into()); @@ -163,7 +155,7 @@ pub async fn georadius( args.push(store_dist.into()); } - Ok((RedisCommandKind::GeoRadius, args)) + Ok((CommandKind::GeoRadius, args)) }) .await?; @@ -172,8 +164,8 @@ pub async fn georadius( pub async fn georadiusbymember( client: &C, - key: RedisKey, - member: RedisValue, + key: Key, + member: Value, radius: f64, unit: GeoUnit, withcoord: bool, @@ -181,9 +173,9 @@ pub async fn georadiusbymember( withhash: bool, count: Option<(u64, Any)>, ord: Option, - store: Option, - storedist: Option, -) -> Result { + store: Option, + storedist: Option, +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(15); args.push(key.into()); @@ -219,7 +211,7 @@ pub async fn georadiusbymember( args.push(store_dist.into()); } - Ok((RedisCommandKind::GeoRadiusByMember, args)) + Ok((CommandKind::GeoRadiusByMember, args)) }) .await?; @@ -228,8 +220,8 @@ pub async fn georadiusbymember( pub async fn geosearch( client: &C, - key: RedisKey, - from_member: Option, + key: Key, + from_member: Option, from_lonlat: Option, by_radius: Option<(f64, GeoUnit)>, by_box: Option<(f64, f64, GeoUnit)>, @@ -238,7 +230,7 @@ pub async fn geosearch( withcoord: bool, withdist: bool, withhash: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(15); args.push(key.into()); @@ -284,7 +276,7 @@ pub async fn geosearch( args.push(static_val!(WITH_HASH)); } - Ok((RedisCommandKind::GeoSearch, args)) + Ok((CommandKind::GeoSearch, args)) }) .await?; @@ -293,16 +285,16 @@ pub async fn geosearch( pub async fn geosearchstore( client: &C, - dest: RedisKey, - source: RedisKey, - from_member: Option, + dest: Key, + source: Key, + from_member: Option, from_lonlat: Option, by_radius: Option<(f64, GeoUnit)>, by_box: Option<(f64, f64, GeoUnit)>, ord: Option, count: Option<(u64, Any)>, storedist: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(14); args.push(dest.into()); @@ -342,7 +334,7 @@ pub async fn geosearchstore( args.push(static_val!(STORE_DIST)); } - Ok((RedisCommandKind::GeoSearchStore, args)) + Ok((CommandKind::GeoSearchStore, args)) }) .await?; diff --git a/src/commands/impls/hashes.rs b/src/commands/impls/hashes.rs index e04d5e7c..b1185cad 100644 --- a/src/commands/impls/hashes.rs +++ b/src/commands/impls/hashes.rs @@ -1,6 +1,6 @@ use super::*; use crate::{ - protocol::{command::RedisCommandKind, utils as protocol_utils}, + protocol::{command::CommandKind, utils as protocol_utils}, types::*, utils, }; @@ -18,7 +18,7 @@ fn frame_is_queued(frame: &Resp3Frame) -> bool { } } -pub async fn hdel(client: &C, key: RedisKey, fields: MultipleKeys) -> Result { +pub async fn hdel(client: &C, key: Key, fields: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + fields.len()); args.push(key.into()); @@ -27,63 +27,53 @@ pub async fn hdel(client: &C, key: RedisKey, fields: MultipleKeys args.push(field.into()); } - Ok((RedisCommandKind::HDel, args)) + Ok((CommandKind::HDel, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hexists(client: &C, key: RedisKey, field: RedisKey) -> Result { - let args: Vec = vec![key.into(), field.into()]; - args_value_cmd(client, RedisCommandKind::HExists, args).await +pub async fn hexists(client: &C, key: Key, field: Key) -> Result { + let args: Vec = vec![key.into(), field.into()]; + args_value_cmd(client, CommandKind::HExists, args).await } -pub async fn hget(client: &C, key: RedisKey, field: RedisKey) -> Result { - let args: Vec = vec![key.into(), field.into()]; - args_value_cmd(client, RedisCommandKind::HGet, args).await +pub async fn hget(client: &C, key: Key, field: Key) -> Result { + let args: Vec = vec![key.into(), field.into()]; + args_value_cmd(client, CommandKind::HGet, args).await } -pub async fn hgetall(client: &C, key: RedisKey) -> Result { - let frame = utils::request_response(client, move || Ok((RedisCommandKind::HGetAll, vec![key.into()]))).await?; +pub async fn hgetall(client: &C, key: Key) -> Result { + let frame = utils::request_response(client, move || Ok((CommandKind::HGetAll, vec![key.into()]))).await?; if frame.as_str().map(|s| s == QUEUED).unwrap_or(false) { protocol_utils::frame_to_results(frame) } else { - Ok(RedisValue::Map(protocol_utils::frame_to_map(frame)?)) + Ok(Value::Map(protocol_utils::frame_to_map(frame)?)) } } -pub async fn hincrby( - client: &C, - key: RedisKey, - field: RedisKey, - increment: i64, -) -> Result { - let args: Vec = vec![key.into(), field.into(), increment.into()]; - args_value_cmd(client, RedisCommandKind::HIncrBy, args).await +pub async fn hincrby(client: &C, key: Key, field: Key, increment: i64) -> Result { + let args: Vec = vec![key.into(), field.into(), increment.into()]; + args_value_cmd(client, CommandKind::HIncrBy, args).await } -pub async fn hincrbyfloat( - client: &C, - key: RedisKey, - field: RedisKey, - increment: f64, -) -> Result { - let args: Vec = vec![key.into(), field.into(), increment.try_into()?]; - args_value_cmd(client, RedisCommandKind::HIncrByFloat, args).await +pub async fn hincrbyfloat(client: &C, key: Key, field: Key, increment: f64) -> Result { + let args: Vec = vec![key.into(), field.into(), increment.try_into()?]; + args_value_cmd(client, CommandKind::HIncrByFloat, args).await } -pub async fn hkeys(client: &C, key: RedisKey) -> Result { - let frame = utils::request_response(client, move || Ok((RedisCommandKind::HKeys, vec![key.into()]))).await?; +pub async fn hkeys(client: &C, key: Key) -> Result { + let frame = utils::request_response(client, move || Ok((CommandKind::HKeys, vec![key.into()]))).await?; protocol_utils::frame_to_results(frame) } -pub async fn hlen(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::HLen, key.into()).await +pub async fn hlen(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::HLen, key.into()).await } -pub async fn hmget(client: &C, key: RedisKey, fields: MultipleKeys) -> Result { +pub async fn hmget(client: &C, key: Key, fields: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + fields.len()); args.push(key.into()); @@ -91,14 +81,14 @@ pub async fn hmget(client: &C, key: RedisKey, fields: MultipleKey for field in fields.inner().into_iter() { args.push(field.into()); } - Ok((RedisCommandKind::HMGet, args)) + Ok((CommandKind::HMGet, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hmset(client: &C, key: RedisKey, values: RedisMap) -> Result { +pub async fn hmset(client: &C, key: Key, values: Map) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + (values.len() * 2)); args.push(key.into()); @@ -107,14 +97,14 @@ pub async fn hmset(client: &C, key: RedisKey, values: RedisMap) - args.push(key.into()); args.push(value); } - Ok((RedisCommandKind::HMSet, args)) + Ok((CommandKind::HMSet, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hset(client: &C, key: RedisKey, values: RedisMap) -> Result { +pub async fn hset(client: &C, key: Key, values: Map) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + (values.len() * 2)); args.push(key.into()); @@ -124,32 +114,23 @@ pub async fn hset(client: &C, key: RedisKey, values: RedisMap) -> args.push(value); } - Ok((RedisCommandKind::HSet, args)) + Ok((CommandKind::HSet, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hsetnx( - client: &C, - key: RedisKey, - field: RedisKey, - value: RedisValue, -) -> Result { +pub async fn hsetnx(client: &C, key: Key, field: Key, value: Value) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::HSetNx, vec![key.into(), field.into(), value])) + Ok((CommandKind::HSetNx, vec![key.into(), field.into(), value])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hrandfield( - client: &C, - key: RedisKey, - count: Option<(i64, bool)>, -) -> Result { +pub async fn hrandfield(client: &C, key: Key, count: Option<(i64, bool)>) -> Result { let (has_count, has_values) = count.as_ref().map(|(_c, b)| (true, *b)).unwrap_or((false, false)); let frame = utils::request_response(client, move || { @@ -163,14 +144,14 @@ pub async fn hrandfield( } } - Ok((RedisCommandKind::HRandField, args)) + Ok((CommandKind::HRandField, args)) }) .await?; if has_count { if has_values && frame.as_str().map(|s| s != QUEUED).unwrap_or(true) { let frame = protocol_utils::flatten_frame(frame); - protocol_utils::frame_to_map(frame).map(RedisValue::Map) + protocol_utils::frame_to_map(frame).map(Value::Map) } else { protocol_utils::frame_to_results(frame) } @@ -179,20 +160,21 @@ pub async fn hrandfield( } } -pub async fn hstrlen(client: &C, key: RedisKey, field: RedisKey) -> Result { +pub async fn hstrlen(client: &C, key: Key, field: Key) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::HStrLen, vec![key.into(), field.into()])) + Ok((CommandKind::HStrLen, vec![key.into(), field.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hvals(client: &C, key: RedisKey) -> Result { - one_arg_values_cmd(client, RedisCommandKind::HVals, key.into()).await +pub async fn hvals(client: &C, key: Key) -> Result { + one_arg_values_cmd(client, CommandKind::HVals, key.into()).await } -pub async fn httl(client: &C, key: RedisKey, fields: MultipleKeys) -> Result { +#[cfg(feature = "i-hexpire")] +pub async fn httl(client: &C, key: Key, fields: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(fields.len() + 3); args.extend([key.into(), static_val!(FIELDS), fields.len().try_into()?]); @@ -200,20 +182,21 @@ pub async fn httl(client: &C, key: RedisKey, fields: MultipleKeys args.push(field.into()); } - Ok((RedisCommandKind::HTtl, args)) + Ok((CommandKind::HTtl, args)) }) .await?; protocol_utils::frame_to_results(frame) } +#[cfg(feature = "i-hexpire")] pub async fn hexpire( client: &C, - key: RedisKey, + key: Key, seconds: i64, options: Option, fields: MultipleKeys, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(fields.len() + 4); args.extend([key.into(), seconds.into()]); @@ -225,20 +208,21 @@ pub async fn hexpire( args.push(field.into()); } - Ok((RedisCommandKind::HExpire, args)) + Ok((CommandKind::HExpire, args)) }) .await?; protocol_utils::frame_to_results(frame) } +#[cfg(feature = "i-hexpire")] pub async fn hexpire_at( client: &C, - key: RedisKey, + key: Key, time: i64, options: Option, fields: MultipleKeys, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(fields.len() + 4); args.extend([key.into(), time.into()]); @@ -250,18 +234,15 @@ pub async fn hexpire_at( args.push(field.into()); } - Ok((RedisCommandKind::HExpireAt, args)) + Ok((CommandKind::HExpireAt, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hexpire_time( - client: &C, - key: RedisKey, - fields: MultipleKeys, -) -> Result { +#[cfg(feature = "i-hexpire")] +pub async fn hexpire_time(client: &C, key: Key, fields: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(fields.len() + 3); args.extend([key.into(), static_val!(FIELDS), fields.len().try_into()?]); @@ -269,14 +250,15 @@ pub async fn hexpire_time( args.push(field.into()); } - Ok((RedisCommandKind::HExpireTime, args)) + Ok((CommandKind::HExpireTime, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hpttl(client: &C, key: RedisKey, fields: MultipleKeys) -> Result { +#[cfg(feature = "i-hexpire")] +pub async fn hpttl(client: &C, key: Key, fields: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(fields.len() + 3); args.extend([key.into(), static_val!(FIELDS), fields.len().try_into()?]); @@ -284,20 +266,21 @@ pub async fn hpttl(client: &C, key: RedisKey, fields: MultipleKey args.push(field.into()); } - Ok((RedisCommandKind::HPTtl, args)) + Ok((CommandKind::HPTtl, args)) }) .await?; protocol_utils::frame_to_results(frame) } +#[cfg(feature = "i-hexpire")] pub async fn hpexpire( client: &C, - key: RedisKey, + key: Key, milliseconds: i64, options: Option, fields: MultipleKeys, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(fields.len() + 4); args.extend([key.into(), milliseconds.into()]); @@ -309,20 +292,21 @@ pub async fn hpexpire( args.push(field.into()); } - Ok((RedisCommandKind::HPExpire, args)) + Ok((CommandKind::HPExpire, args)) }) .await?; protocol_utils::frame_to_results(frame) } +#[cfg(feature = "i-hexpire")] pub async fn hpexpire_at( client: &C, - key: RedisKey, + key: Key, time: i64, options: Option, fields: MultipleKeys, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(fields.len() + 4); args.extend([key.into(), time.into()]); @@ -334,18 +318,15 @@ pub async fn hpexpire_at( args.push(field.into()); } - Ok((RedisCommandKind::HPExpireAt, args)) + Ok((CommandKind::HPExpireAt, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hpexpire_time( - client: &C, - key: RedisKey, - fields: MultipleKeys, -) -> Result { +#[cfg(feature = "i-hexpire")] +pub async fn hpexpire_time(client: &C, key: Key, fields: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(fields.len() + 3); args.extend([key.into(), static_val!(FIELDS), fields.len().try_into()?]); @@ -353,18 +334,15 @@ pub async fn hpexpire_time( args.push(field.into()); } - Ok((RedisCommandKind::HPExpireTime, args)) + Ok((CommandKind::HPExpireTime, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn hpersist( - client: &C, - key: RedisKey, - fields: MultipleKeys, -) -> Result { +#[cfg(feature = "i-hexpire")] +pub async fn hpersist(client: &C, key: Key, fields: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(fields.len() + 3); args.extend([key.into(), static_val!(FIELDS), fields.len().try_into()?]); @@ -372,7 +350,7 @@ pub async fn hpersist( args.push(field.into()); } - Ok((RedisCommandKind::HPersist, args)) + Ok((CommandKind::HPersist, args)) }) .await?; diff --git a/src/commands/impls/hyperloglog.rs b/src/commands/impls/hyperloglog.rs index 71793815..9d52d6bc 100644 --- a/src/commands/impls/hyperloglog.rs +++ b/src/commands/impls/hyperloglog.rs @@ -1,15 +1,11 @@ use super::*; use crate::{ - protocol::{command::RedisCommandKind, utils as protocol_utils}, + protocol::{command::CommandKind, utils as protocol_utils}, types::*, utils, }; -pub async fn pfadd( - client: &C, - key: RedisKey, - elements: MultipleValues, -) -> Result { +pub async fn pfadd(client: &C, key: Key, elements: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let elements = elements.into_multiple_values(); let mut args = Vec::with_capacity(1 + elements.len()); @@ -18,23 +14,19 @@ pub async fn pfadd( for element in elements.into_iter() { args.push(element); } - Ok((RedisCommandKind::Pfadd, args)) + Ok((CommandKind::Pfadd, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn pfcount(client: &C, keys: MultipleKeys) -> Result { - let args: Vec = keys.inner().into_iter().map(|k| k.into()).collect(); - args_value_cmd(client, RedisCommandKind::Pfcount, args).await +pub async fn pfcount(client: &C, keys: MultipleKeys) -> Result { + let args: Vec = keys.inner().into_iter().map(|k| k.into()).collect(); + args_value_cmd(client, CommandKind::Pfcount, args).await } -pub async fn pfmerge( - client: &C, - dest: RedisKey, - sources: MultipleKeys, -) -> Result { +pub async fn pfmerge(client: &C, dest: Key, sources: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + sources.len()); args.push(dest.into()); @@ -42,7 +34,7 @@ pub async fn pfmerge( for source in sources.inner().into_iter() { args.push(source.into()); } - Ok((RedisCommandKind::Pfmerge, args)) + Ok((CommandKind::Pfmerge, args)) }) .await?; diff --git a/src/commands/impls/keys.rs b/src/commands/impls/keys.rs index 31cb0f55..6bbc994e 100644 --- a/src/commands/impls/keys.rs +++ b/src/commands/impls/keys.rs @@ -1,18 +1,15 @@ use super::*; use crate::{ error::*, - protocol::{command::RedisCommandKind, utils as protocol_utils}, + protocol::{command::CommandKind, utils as protocol_utils}, types::*, utils, }; use std::convert::TryInto; -fn check_empty_keys(keys: &MultipleKeys) -> Result<(), RedisError> { +fn check_empty_keys(keys: &MultipleKeys) -> Result<(), Error> { if keys.len() == 0 { - Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "At least one key is required.", - )) + Err(Error::new(ErrorKind::InvalidArgument, "At least one key is required.")) } else { Ok(()) } @@ -20,18 +17,18 @@ fn check_empty_keys(keys: &MultipleKeys) -> Result<(), RedisError> { value_cmd!(randomkey, Randomkey); -pub async fn get(client: &C, key: RedisKey) -> Result { - one_arg_values_cmd(client, RedisCommandKind::Get, key.into()).await +pub async fn get(client: &C, key: Key) -> Result { + one_arg_values_cmd(client, CommandKind::Get, key.into()).await } pub async fn set( client: &C, - key: RedisKey, - value: RedisValue, + key: Key, + value: Value, expire: Option, options: Option, get: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(6); args.push(key.into()); @@ -51,117 +48,132 @@ pub async fn set( args.push(static_val!(GET)); } - Ok((RedisCommandKind::Set, args)) + Ok((CommandKind::Set, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn setnx(client: &C, key: RedisKey, value: RedisValue) -> Result { - args_value_cmd(client, RedisCommandKind::Setnx, vec![key.into(), value]).await +pub async fn setnx(client: &C, key: Key, value: Value) -> Result { + args_value_cmd(client, CommandKind::Setnx, vec![key.into(), value]).await } -pub async fn del(client: &C, keys: MultipleKeys) -> Result { +pub async fn del(client: &C, keys: MultipleKeys) -> Result { check_empty_keys(&keys)?; - let args: Vec = keys.inner().drain(..).map(|k| k.into()).collect(); - let frame = utils::request_response(client, move || Ok((RedisCommandKind::Del, args))).await?; + let args: Vec = keys.inner().drain(..).map(|k| k.into()).collect(); + let frame = utils::request_response(client, move || Ok((CommandKind::Del, args))).await?; protocol_utils::frame_to_results(frame) } -pub async fn unlink(client: &C, keys: MultipleKeys) -> Result { +pub async fn unlink(client: &C, keys: MultipleKeys) -> Result { check_empty_keys(&keys)?; - let args: Vec = keys.inner().drain(..).map(|k| k.into()).collect(); - let frame = utils::request_response(client, move || Ok((RedisCommandKind::Unlink, args))).await?; + let args: Vec = keys.inner().drain(..).map(|k| k.into()).collect(); + let frame = utils::request_response(client, move || Ok((CommandKind::Unlink, args))).await?; protocol_utils::frame_to_results(frame) } -pub async fn append(client: &C, key: RedisKey, value: RedisValue) -> Result { - args_value_cmd(client, RedisCommandKind::Append, vec![key.into(), value]).await +pub async fn append(client: &C, key: Key, value: Value) -> Result { + args_value_cmd(client, CommandKind::Append, vec![key.into(), value]).await } -pub async fn incr(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Incr, key.into()).await +pub async fn incr(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Incr, key.into()).await } -pub async fn decr(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Decr, key.into()).await +pub async fn decr(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Decr, key.into()).await } -pub async fn incr_by(client: &C, key: RedisKey, val: i64) -> Result { - let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::IncrBy, vec![key.into(), val.into()])) - }) - .await?; +pub async fn incr_by(client: &C, key: Key, val: i64) -> Result { + let frame = + utils::request_response(client, move || Ok((CommandKind::IncrBy, vec![key.into(), val.into()]))).await?; protocol_utils::frame_to_results(frame) } -pub async fn decr_by(client: &C, key: RedisKey, val: i64) -> Result { - let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::DecrBy, vec![key.into(), val.into()])) - }) - .await?; +pub async fn decr_by(client: &C, key: Key, val: i64) -> Result { + let frame = + utils::request_response(client, move || Ok((CommandKind::DecrBy, vec![key.into(), val.into()]))).await?; protocol_utils::frame_to_results(frame) } -pub async fn incr_by_float(client: &C, key: RedisKey, val: f64) -> Result { - let val: RedisValue = val.try_into()?; - let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::IncrByFloat, vec![key.into(), val])) - }) - .await?; +pub async fn incr_by_float(client: &C, key: Key, val: f64) -> Result { + let val: Value = val.try_into()?; + let frame = utils::request_response(client, move || Ok((CommandKind::IncrByFloat, vec![key.into(), val]))).await?; protocol_utils::frame_to_results(frame) } -pub async fn ttl(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Ttl, key.into()).await +pub async fn ttl(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Ttl, key.into()).await } -pub async fn pttl(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Pttl, key.into()).await +pub async fn pttl(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Pttl, key.into()).await } -pub async fn persist(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Persist, key.into()).await +pub async fn persist(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Persist, key.into()).await } -pub async fn expire(client: &C, key: RedisKey, seconds: i64) -> Result { +pub async fn expire( + client: &C, + key: Key, + seconds: i64, + options: Option, +) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Expire, vec![key.into(), seconds.into()])) + let args = if let Some(options) = options { + vec![key.into(), seconds.into(), options.to_str().into()] + } else { + vec![key.into(), seconds.into()] + }; + + Ok((CommandKind::Expire, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn expire_at(client: &C, key: RedisKey, timestamp: i64) -> Result { +pub async fn expire_at( + client: &C, + key: Key, + timestamp: i64, + options: Option, +) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::ExpireAt, vec![key.into(), timestamp.into()])) + let args = if let Some(options) = options { + vec![key.into(), timestamp.into(), options.to_str().into()] + } else { + vec![key.into(), timestamp.into()] + }; + + Ok((CommandKind::ExpireAt, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn expire_time(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::ExpireTime, key.into()).await +pub async fn expire_time(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::ExpireTime, key.into()).await } -pub async fn pexpire_time(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::PexpireTime, key.into()).await +pub async fn pexpire_time(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::PexpireTime, key.into()).await } pub async fn pexpire( client: &C, - key: RedisKey, + key: Key, milliseconds: i64, options: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let args = if let Some(options) = options { vec![key.into(), milliseconds.into(), options.to_str().into()] @@ -169,7 +181,7 @@ pub async fn pexpire( vec![key.into(), milliseconds.into()] }; - Ok((RedisCommandKind::Pexpire, args)) + Ok((CommandKind::Pexpire, args)) }) .await?; @@ -178,10 +190,10 @@ pub async fn pexpire( pub async fn pexpire_at( client: &C, - key: RedisKey, + key: Key, timestamp: i64, options: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let args = if let Some(options) = options { vec![key.into(), timestamp.into(), options.to_str().into()] @@ -189,14 +201,14 @@ pub async fn pexpire_at( vec![key.into(), timestamp.into()] }; - Ok((RedisCommandKind::Pexpireat, args)) + Ok((CommandKind::Pexpireat, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn exists(client: &C, keys: MultipleKeys) -> Result { +pub async fn exists(client: &C, keys: MultipleKeys) -> Result { check_empty_keys(&keys)?; let frame = utils::request_response(client, move || { @@ -206,27 +218,27 @@ pub async fn exists(client: &C, keys: MultipleKeys) -> Result(client: &C, key: RedisKey) -> Result { - one_arg_values_cmd(client, RedisCommandKind::Dump, key.into()).await +pub async fn dump(client: &C, key: Key) -> Result { + one_arg_values_cmd(client, CommandKind::Dump, key.into()).await } pub async fn restore( client: &C, - key: RedisKey, + key: Key, ttl: i64, - serialized: RedisValue, + serialized: Value, replace: bool, absttl: bool, idletime: Option, frequency: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(9); args.push(key.into()); @@ -248,21 +260,16 @@ pub async fn restore( args.push(frequency.into()); } - Ok((RedisCommandKind::Restore, args)) + Ok((CommandKind::Restore, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn getrange( - client: &C, - key: RedisKey, - start: usize, - end: usize, -) -> Result { +pub async fn getrange(client: &C, key: Key, start: usize, end: usize) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::GetRange, vec![ + Ok((CommandKind::GetRange, vec![ key.into(), start.try_into()?, end.try_into()?, @@ -273,57 +280,36 @@ pub async fn getrange( protocol_utils::frame_to_results(frame) } -pub async fn setrange( - client: &C, - key: RedisKey, - offset: u32, - value: RedisValue, -) -> Result { +pub async fn setrange(client: &C, key: Key, offset: u32, value: Value) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Setrange, vec![key.into(), offset.into(), value])) + Ok((CommandKind::Setrange, vec![key.into(), offset.into(), value])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn getset(client: &C, key: RedisKey, value: RedisValue) -> Result { - args_values_cmd(client, RedisCommandKind::GetSet, vec![key.into(), value]).await +pub async fn getset(client: &C, key: Key, value: Value) -> Result { + args_values_cmd(client, CommandKind::GetSet, vec![key.into(), value]).await } -pub async fn rename( - client: &C, - source: RedisKey, - destination: RedisKey, -) -> Result { - args_values_cmd(client, RedisCommandKind::Rename, vec![ - source.into(), - destination.into(), - ]) - .await +pub async fn rename(client: &C, source: Key, destination: Key) -> Result { + args_values_cmd(client, CommandKind::Rename, vec![source.into(), destination.into()]).await } -pub async fn renamenx( - client: &C, - source: RedisKey, - destination: RedisKey, -) -> Result { - args_values_cmd(client, RedisCommandKind::Renamenx, vec![ - source.into(), - destination.into(), - ]) - .await +pub async fn renamenx(client: &C, source: Key, destination: Key) -> Result { + args_values_cmd(client, CommandKind::Renamenx, vec![source.into(), destination.into()]).await } -pub async fn getdel(client: &C, key: RedisKey) -> Result { - one_arg_values_cmd(client, RedisCommandKind::GetDel, key.into()).await +pub async fn getdel(client: &C, key: Key) -> Result { + one_arg_values_cmd(client, CommandKind::GetDel, key.into()).await } -pub async fn strlen(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Strlen, key.into()).await +pub async fn strlen(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Strlen, key.into()).await } -pub async fn mget(client: &C, keys: MultipleKeys) -> Result { +pub async fn mget(client: &C, keys: MultipleKeys) -> Result { check_empty_keys(&keys)?; let frame = utils::request_response(client, move || { @@ -333,19 +319,16 @@ pub async fn mget(client: &C, keys: MultipleKeys) -> Result(client: &C, values: RedisMap) -> Result { +pub async fn mset(client: &C, values: Map) -> Result { if values.len() == 0 { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "Values cannot be empty.", - )); + return Err(Error::new(ErrorKind::InvalidArgument, "Values cannot be empty.")); } let frame = utils::request_response(client, move || { @@ -356,19 +339,16 @@ pub async fn mset(client: &C, values: RedisMap) -> Result(client: &C, values: RedisMap) -> Result { +pub async fn msetnx(client: &C, values: Map) -> Result { if values.len() == 0 { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "Values cannot be empty.", - )); + return Err(Error::new(ErrorKind::InvalidArgument, "Values cannot be empty.")); } let frame = utils::request_response(client, move || { @@ -379,7 +359,7 @@ pub async fn msetnx(client: &C, values: RedisMap) -> Result(client: &C, values: RedisMap) -> Result( client: &C, - source: RedisKey, - destination: RedisKey, + source: Key, + destination: Key, db: Option, replace: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(5); args.push(source.into()); @@ -406,33 +386,33 @@ pub async fn copy( args.push(static_val!(REPLACE)); } - Ok((RedisCommandKind::Copy, args)) + Ok((CommandKind::Copy, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn watch(client: &C, keys: MultipleKeys) -> Result<(), RedisError> { +pub async fn watch(client: &C, keys: MultipleKeys) -> Result<(), Error> { let args = keys.inner().into_iter().map(|k| k.into()).collect(); - args_ok_cmd(client, RedisCommandKind::Watch, args).await + args_ok_cmd(client, CommandKind::Watch, args).await } ok_cmd!(unwatch, Unwatch); -pub async fn r#type(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Type, key.into()).await +pub async fn r#type(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Type, key.into()).await } pub async fn lcs( client: &C, - key1: RedisKey, - key2: RedisKey, + key1: Key, + key2: Key, len: bool, idx: bool, minmatchlen: Option, withmatchlen: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(7); args.push(key1.into()); @@ -452,7 +432,7 @@ pub async fn lcs( args.push(static_val!(WITHMATCHLEN)); } - Ok((RedisCommandKind::Lcs, args)) + Ok((CommandKind::Lcs, args)) }) .await?; diff --git a/src/commands/impls/lists.rs b/src/commands/impls/lists.rs index b2ba58e2..5012db50 100644 --- a/src/commands/impls/lists.rs +++ b/src/commands/impls/lists.rs @@ -1,7 +1,7 @@ use super::*; use crate::{ - protocol::{command::RedisCommandKind, utils as protocol_utils}, - types::*, + protocol::{command::CommandKind, utils as protocol_utils}, + types::{lists::*, Key, Limit, MultipleKeys, MultipleStrings, MultipleValues, SortOrder, Value}, utils, }; use bytes_utils::Str; @@ -9,13 +9,13 @@ use std::convert::TryInto; pub async fn sort_ro( client: &C, - key: RedisKey, + key: Key, by: Option, limit: Option, get: MultipleStrings, order: Option, alpha: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(8 + get.len() * 2); args.push(key.into()); @@ -40,7 +40,7 @@ pub async fn sort_ro( args.push(static_val!("ALPHA")); } - Ok((RedisCommandKind::SortRo, args)) + Ok((CommandKind::SortRo, args)) }) .await?; @@ -49,14 +49,14 @@ pub async fn sort_ro( pub async fn sort( client: &C, - key: RedisKey, + key: Key, by: Option, limit: Option, get: MultipleStrings, order: Option, alpha: bool, - store: Option, -) -> Result { + store: Option, +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(10 + get.len() * 2); args.push(key.into()); @@ -85,7 +85,7 @@ pub async fn sort( args.push(dest.into()); } - Ok((RedisCommandKind::Sort, args)) + Ok((CommandKind::Sort, args)) }) .await?; @@ -98,8 +98,8 @@ pub async fn blmpop( keys: MultipleKeys, direction: LMoveDirection, count: Option, -) -> Result { - let timeout: RedisValue = timeout.try_into()?; +) -> Result { + let timeout: Value = timeout.try_into()?; let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len() + 4); @@ -114,7 +114,7 @@ pub async fn blmpop( args.push(count.into()); } - Ok((RedisCommandKind::BlmPop, args)) + Ok((CommandKind::BlmPop, args)) }) .await?; @@ -122,8 +122,8 @@ pub async fn blmpop( protocol_utils::frame_to_results(frame) } -pub async fn blpop(client: &C, keys: MultipleKeys, timeout: f64) -> Result { - let timeout: RedisValue = timeout.try_into()?; +pub async fn blpop(client: &C, keys: MultipleKeys, timeout: f64) -> Result { + let timeout: Value = timeout.try_into()?; let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len() + 1); @@ -132,7 +132,7 @@ pub async fn blpop(client: &C, keys: MultipleKeys, timeout: f64) } args.push(timeout); - Ok((RedisCommandKind::BlPop, args)) + Ok((CommandKind::BlPop, args)) }) .await?; @@ -140,8 +140,8 @@ pub async fn blpop(client: &C, keys: MultipleKeys, timeout: f64) protocol_utils::frame_to_results(frame) } -pub async fn brpop(client: &C, keys: MultipleKeys, timeout: f64) -> Result { - let timeout: RedisValue = timeout.try_into()?; +pub async fn brpop(client: &C, keys: MultipleKeys, timeout: f64) -> Result { + let timeout: Value = timeout.try_into()?; let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len() + 1); @@ -150,7 +150,7 @@ pub async fn brpop(client: &C, keys: MultipleKeys, timeout: f64) } args.push(timeout); - Ok((RedisCommandKind::BrPop, args)) + Ok((CommandKind::BrPop, args)) }) .await?; @@ -160,14 +160,14 @@ pub async fn brpop(client: &C, keys: MultipleKeys, timeout: f64) pub async fn brpoplpush( client: &C, - source: RedisKey, - destination: RedisKey, + source: Key, + destination: Key, timeout: f64, -) -> Result { - let timeout: RedisValue = timeout.try_into()?; +) -> Result { + let timeout: Value = timeout.try_into()?; let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::BrPopLPush, vec![ + Ok((CommandKind::BrPopLPush, vec![ source.into(), destination.into(), timeout, @@ -181,13 +181,13 @@ pub async fn brpoplpush( pub async fn blmove( client: &C, - source: RedisKey, - destination: RedisKey, + source: Key, + destination: Key, source_direction: LMoveDirection, destination_direction: LMoveDirection, timeout: f64, -) -> Result { - let timeout: RedisValue = timeout.try_into()?; +) -> Result { + let timeout: Value = timeout.try_into()?; let frame = utils::request_response(client, move || { let args = vec![ @@ -198,7 +198,7 @@ pub async fn blmove( timeout, ]; - Ok((RedisCommandKind::BlMove, args)) + Ok((CommandKind::BlMove, args)) }) .await?; @@ -211,7 +211,7 @@ pub async fn lmpop( keys: MultipleKeys, direction: LMoveDirection, count: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len() + 3); args.push(keys.len().try_into()?); @@ -224,27 +224,27 @@ pub async fn lmpop( args.push(count.into()); } - Ok((RedisCommandKind::LMPop, args)) + Ok((CommandKind::LMPop, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn lindex(client: &C, key: RedisKey, index: i64) -> Result { - let args: Vec = vec![key.into(), index.into()]; - args_value_cmd(client, RedisCommandKind::LIndex, args).await +pub async fn lindex(client: &C, key: Key, index: i64) -> Result { + let args: Vec = vec![key.into(), index.into()]; + args_value_cmd(client, CommandKind::LIndex, args).await } pub async fn linsert( client: &C, - key: RedisKey, + key: Key, location: ListLocation, - pivot: RedisValue, - element: RedisValue, -) -> Result { + pivot: Value, + element: Value, +) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::LInsert, vec![ + Ok((CommandKind::LInsert, vec![ key.into(), location.to_str().into(), pivot, @@ -256,11 +256,11 @@ pub async fn linsert( protocol_utils::frame_to_results(frame) } -pub async fn llen(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::LLen, key.into()).await +pub async fn llen(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::LLen, key.into()).await } -pub async fn lpop(client: &C, key: RedisKey, count: Option) -> Result { +pub async fn lpop(client: &C, key: Key, count: Option) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); args.push(key.into()); @@ -269,7 +269,7 @@ pub async fn lpop(client: &C, key: RedisKey, count: Option args.push(count.try_into()?); } - Ok((RedisCommandKind::LPop, args)) + Ok((CommandKind::LPop, args)) }) .await?; @@ -278,12 +278,12 @@ pub async fn lpop(client: &C, key: RedisKey, count: Option pub async fn lpos( client: &C, - key: RedisKey, - element: RedisValue, + key: Key, + element: Value, rank: Option, count: Option, maxlen: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(8); args.push(key.into()); @@ -302,18 +302,14 @@ pub async fn lpos( args.push(maxlen.into()); } - Ok((RedisCommandKind::LPos, args)) + Ok((CommandKind::LPos, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn lpush( - client: &C, - key: RedisKey, - elements: MultipleValues, -) -> Result { +pub async fn lpush(client: &C, key: Key, elements: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let elements = elements.into_multiple_values(); let mut args = Vec::with_capacity(1 + elements.len()); @@ -323,18 +319,14 @@ pub async fn lpush( args.push(element); } - Ok((RedisCommandKind::LPush, args)) + Ok((CommandKind::LPush, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn lpushx( - client: &C, - key: RedisKey, - elements: MultipleValues, -) -> Result { +pub async fn lpushx(client: &C, key: Key, elements: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let elements = elements.into_multiple_values(); let mut args = Vec::with_capacity(1 + elements.len()); @@ -344,54 +336,34 @@ pub async fn lpushx( args.push(element); } - Ok((RedisCommandKind::LPushX, args)) + Ok((CommandKind::LPushX, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn lrange( - client: &C, - key: RedisKey, - start: i64, - stop: i64, -) -> Result { +pub async fn lrange(client: &C, key: Key, start: i64, stop: i64) -> Result { let (key, start, stop) = (key.into(), start.into(), stop.into()); - args_values_cmd(client, RedisCommandKind::LRange, vec![key, start, stop]).await + args_values_cmd(client, CommandKind::LRange, vec![key, start, stop]).await } -pub async fn lrem( - client: &C, - key: RedisKey, - count: i64, - element: RedisValue, -) -> Result { +pub async fn lrem(client: &C, key: Key, count: i64, element: Value) -> Result { let (key, count) = (key.into(), count.into()); - args_value_cmd(client, RedisCommandKind::LRem, vec![key, count, element]).await + args_value_cmd(client, CommandKind::LRem, vec![key, count, element]).await } -pub async fn lset( - client: &C, - key: RedisKey, - index: i64, - element: RedisValue, -) -> Result { +pub async fn lset(client: &C, key: Key, index: i64, element: Value) -> Result { let args = vec![key.into(), index.into(), element]; - args_value_cmd(client, RedisCommandKind::LSet, args).await + args_value_cmd(client, CommandKind::LSet, args).await } -pub async fn ltrim( - client: &C, - key: RedisKey, - start: i64, - stop: i64, -) -> Result { +pub async fn ltrim(client: &C, key: Key, start: i64, stop: i64) -> Result { let args = vec![key.into(), start.into(), stop.into()]; - args_value_cmd(client, RedisCommandKind::LTrim, args).await + args_value_cmd(client, CommandKind::LTrim, args).await } -pub async fn rpop(client: &C, key: RedisKey, count: Option) -> Result { +pub async fn rpop(client: &C, key: Key, count: Option) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); args.push(key.into()); @@ -400,29 +372,25 @@ pub async fn rpop(client: &C, key: RedisKey, count: Option args.push(count.try_into()?); } - Ok((RedisCommandKind::Rpop, args)) + Ok((CommandKind::Rpop, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn rpoplpush( - client: &C, - source: RedisKey, - dest: RedisKey, -) -> Result { +pub async fn rpoplpush(client: &C, source: Key, dest: Key) -> Result { let args = vec![source.into(), dest.into()]; - args_value_cmd(client, RedisCommandKind::Rpoplpush, args).await + args_value_cmd(client, CommandKind::Rpoplpush, args).await } pub async fn lmove( client: &C, - source: RedisKey, - dest: RedisKey, + source: Key, + dest: Key, source_direction: LMoveDirection, dest_direction: LMoveDirection, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let args = vec![ source.into(), @@ -431,18 +399,14 @@ pub async fn lmove( dest_direction.to_str().into(), ]; - Ok((RedisCommandKind::LMove, args)) + Ok((CommandKind::LMove, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn rpush( - client: &C, - key: RedisKey, - elements: MultipleValues, -) -> Result { +pub async fn rpush(client: &C, key: Key, elements: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let elements = elements.into_multiple_values(); let mut args = Vec::with_capacity(1 + elements.len()); @@ -452,18 +416,14 @@ pub async fn rpush( args.push(element); } - Ok((RedisCommandKind::Rpush, args)) + Ok((CommandKind::Rpush, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn rpushx( - client: &C, - key: RedisKey, - elements: MultipleValues, -) -> Result { +pub async fn rpushx(client: &C, key: Key, elements: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let elements = elements.into_multiple_values(); let mut args = Vec::with_capacity(1 + elements.len()); @@ -473,7 +433,7 @@ pub async fn rpushx( args.push(element); } - Ok((RedisCommandKind::Rpushx, args)) + Ok((CommandKind::Rpushx, args)) }) .await?; diff --git a/src/commands/impls/lua.rs b/src/commands/impls/lua.rs index 55815a42..589b5a4b 100644 --- a/src/commands/impls/lua.rs +++ b/src/commands/impls/lua.rs @@ -3,15 +3,18 @@ use super::*; use crate::util::sha1_hash; use crate::{ error::*, - modules::inner::RedisClientInner, + modules::inner::ClientInner, protocol::{ - command::{RedisCommand, RedisCommandKind}, + command::{Command, CommandKind}, hashers::ClusterHash, responders::ResponseKind, utils as protocol_utils, }, runtime::{oneshot_channel, RefCount}, - types::*, + types::{ + scripts::{FnPolicy, ScriptDebugFlag}, + *, + }, utils, }; use bytes::Bytes; @@ -21,7 +24,7 @@ use std::{convert::TryInto, str}; /// Check that all the keys in an EVAL* command belong to the same server, returning a key slot that maps to that /// server. -pub fn check_key_slot(inner: &RefCount, keys: &[RedisKey]) -> Result, RedisError> { +pub fn check_key_slot(inner: &RefCount, keys: &[Key]) -> Result, Error> { if inner.config.server.is_clustered() { inner.with_cluster_state(|state| { let (mut cmd_server, mut cmd_slot) = (None, None); @@ -31,8 +34,8 @@ pub fn check_key_slot(inner: &RefCount, keys: &[RedisKey]) -> if let Some(server) = state.get_server(key_slot) { if let Some(ref cmd_server) = cmd_server { if cmd_server != server { - return Err(RedisError::new( - RedisErrorKind::Cluster, + return Err(Error::new( + ErrorKind::Cluster, "All keys must belong to the same cluster node.", )); } @@ -41,8 +44,8 @@ pub fn check_key_slot(inner: &RefCount, keys: &[RedisKey]) -> cmd_slot = Some(key_slot); } } else { - return Err(RedisError::new( - RedisErrorKind::Cluster, + return Err(Error::new( + ErrorKind::Cluster, format!("Missing server for hash slot {}", key_slot), )); } @@ -55,20 +58,20 @@ pub fn check_key_slot(inner: &RefCount, keys: &[RedisKey]) -> } } -pub async fn script_load(client: &C, script: Str) -> Result { - one_arg_value_cmd(client, RedisCommandKind::ScriptLoad, script.into()).await +pub async fn script_load(client: &C, script: Str) -> Result { + one_arg_value_cmd(client, CommandKind::ScriptLoad, script.into()).await } #[cfg(feature = "sha-1")] -pub async fn script_load_cluster(client: &C, script: Str) -> Result { +pub async fn script_load_cluster(client: &C, script: Str) -> Result { if !client.inner().config.server.is_clustered() { return script_load(client, script).await; } let hash = sha1_hash(&script); let (tx, rx) = oneshot_channel(); - let response = ResponseKind::new_buffer(tx); - let mut command: RedisCommand = (RedisCommandKind::_ScriptLoadCluster, vec![script.into()], response).into(); + let response = ResponseKind::Respond(Some(tx)); + let mut command: Command = (CommandKind::_ScriptLoadCluster, vec![script.into()], response).into(); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; @@ -78,14 +81,14 @@ pub async fn script_load_cluster(client: &C, script: Str) -> Resu ok_cmd!(script_kill, ScriptKill); -pub async fn script_kill_cluster(client: &C) -> Result<(), RedisError> { +pub async fn script_kill_cluster(client: &C) -> Result<(), Error> { if !client.inner().config.server.is_clustered() { return script_kill(client).await; } let (tx, rx) = oneshot_channel(); - let response = ResponseKind::new_buffer(tx); - let mut command: RedisCommand = (RedisCommandKind::_ScriptKillCluster, vec![], response).into(); + let response = ResponseKind::Respond(Some(tx)); + let mut command: Command = (CommandKind::_ScriptKillCluster, vec![], response).into(); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; @@ -93,10 +96,10 @@ pub async fn script_kill_cluster(client: &C) -> Result<(), RedisE Ok(()) } -pub async fn script_flush(client: &C, r#async: bool) -> Result<(), RedisError> { +pub async fn script_flush(client: &C, r#async: bool) -> Result<(), Error> { let frame = utils::request_response(client, move || { let arg = static_val!(if r#async { ASYNC } else { SYNC }); - Ok((RedisCommandKind::ScriptFlush, vec![arg])) + Ok((CommandKind::ScriptFlush, vec![arg])) }) .await?; @@ -104,15 +107,15 @@ pub async fn script_flush(client: &C, r#async: bool) -> Result<() protocol_utils::expect_ok(&response) } -pub async fn script_flush_cluster(client: &C, r#async: bool) -> Result<(), RedisError> { +pub async fn script_flush_cluster(client: &C, r#async: bool) -> Result<(), Error> { if !client.inner().config.server.is_clustered() { return script_flush(client, r#async).await; } let (tx, rx) = oneshot_channel(); let arg = static_val!(if r#async { ASYNC } else { SYNC }); - let response = ResponseKind::new_buffer(tx); - let mut command: RedisCommand = (RedisCommandKind::_ScriptFlushCluster, vec![arg], response).into(); + let response = ResponseKind::Respond(Some(tx)); + let mut command: Command = (CommandKind::_ScriptFlushCluster, vec![arg], response).into(); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; @@ -121,19 +124,19 @@ pub async fn script_flush_cluster(client: &C, r#async: bool) -> R Ok(()) } -pub async fn script_exists(client: &C, hashes: MultipleStrings) -> Result { +pub async fn script_exists(client: &C, hashes: MultipleStrings) -> Result { let frame = utils::request_response(client, move || { let args = hashes.inner().into_iter().map(|s| s.into()).collect(); - Ok((RedisCommandKind::ScriptExists, args)) + Ok((CommandKind::ScriptExists, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn script_debug(client: &C, flag: ScriptDebugFlag) -> Result<(), RedisError> { +pub async fn script_debug(client: &C, flag: ScriptDebugFlag) -> Result<(), Error> { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::ScriptDebug, vec![flag.to_str().into()])) + Ok((CommandKind::ScriptDebug, vec![flag.to_str().into()])) }) .await?; @@ -146,7 +149,7 @@ pub async fn evalsha( hash: Str, keys: MultipleKeys, cmd_args: MultipleValues, -) -> Result { +) -> Result { let keys = keys.inner(); let custom_key_slot = check_key_slot(client.inner(), &keys)?; @@ -163,7 +166,7 @@ pub async fn evalsha( args.push(arg); } - let mut command: RedisCommand = (RedisCommandKind::EvalSha, args).into(); + let mut command: Command = (CommandKind::EvalSha, args).into(); command.hasher = custom_key_slot.map(ClusterHash::Custom).unwrap_or(ClusterHash::Random); command.can_pipeline = false; Ok(command) @@ -178,7 +181,7 @@ pub async fn eval( script: Str, keys: MultipleKeys, cmd_args: MultipleValues, -) -> Result { +) -> Result { let keys = keys.inner(); let custom_key_slot = check_key_slot(client.inner(), &keys)?; @@ -195,7 +198,7 @@ pub async fn eval( args.push(arg); } - let mut command: RedisCommand = (RedisCommandKind::Eval, args).into(); + let mut command: Command = (CommandKind::Eval, args).into(); command.hasher = custom_key_slot.map(ClusterHash::Custom).unwrap_or(ClusterHash::Random); command.can_pipeline = false; Ok(command) @@ -210,7 +213,7 @@ pub async fn fcall( func: Str, keys: MultipleKeys, args: MultipleValues, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let args = args.into_multiple_values(); let mut arguments = Vec::with_capacity(keys.len() + args.len() + 2); @@ -227,7 +230,7 @@ pub async fn fcall( arguments.push(arg); } - let mut command: RedisCommand = (RedisCommandKind::Fcall, arguments).into(); + let mut command: Command = (CommandKind::Fcall, arguments).into(); command.hasher = custom_key_slot.map(ClusterHash::Custom).unwrap_or(ClusterHash::Random); command.can_pipeline = false; Ok(command) @@ -242,7 +245,7 @@ pub async fn fcall_ro( func: Str, keys: MultipleKeys, args: MultipleValues, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let args = args.into_multiple_values(); let mut arguments = Vec::with_capacity(keys.len() + args.len() + 2); @@ -259,7 +262,7 @@ pub async fn fcall_ro( arguments.push(arg); } - let mut command: RedisCommand = (RedisCommandKind::FcallRO, arguments).into(); + let mut command: Command = (CommandKind::FcallRO, arguments).into(); command.hasher = custom_key_slot.map(ClusterHash::Custom).unwrap_or(ClusterHash::Random); command.can_pipeline = false; Ok(command) @@ -269,26 +272,25 @@ pub async fn fcall_ro( protocol_utils::frame_to_results(frame) } -pub async fn function_delete(client: &C, library_name: Str) -> Result { +pub async fn function_delete(client: &C, library_name: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::FunctionDelete, vec![library_name.into()])) + Ok((CommandKind::FunctionDelete, vec![library_name.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn function_delete_cluster(client: &C, library_name: Str) -> Result<(), RedisError> { +pub async fn function_delete_cluster(client: &C, library_name: Str) -> Result<(), Error> { if !client.inner().config.server.is_clustered() { return function_delete(client, library_name).await.map(|_| ()); } let (tx, rx) = oneshot_channel(); - let args: Vec = vec![library_name.into()]; - - let response = ResponseKind::new_buffer(tx); - let mut command: RedisCommand = (RedisCommandKind::_FunctionDeleteCluster, args, response).into(); + let args: Vec = vec![library_name.into()]; + let response = ResponseKind::Respond(Some(tx)); + let mut command: Command = (CommandKind::_FunctionDeleteCluster, args, response).into(); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; @@ -296,7 +298,7 @@ pub async fn function_delete_cluster(client: &C, library_name: St Ok(()) } -pub async fn function_flush(client: &C, r#async: bool) -> Result { +pub async fn function_flush(client: &C, r#async: bool) -> Result { let frame = utils::request_response(client, move || { let args = if r#async { vec![static_val!(ASYNC)] @@ -304,14 +306,14 @@ pub async fn function_flush(client: &C, r#async: bool) -> Result< vec![static_val!(SYNC)] }; - Ok((RedisCommandKind::FunctionFlush, args)) + Ok((CommandKind::FunctionFlush, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn function_flush_cluster(client: &C, r#async: bool) -> Result<(), RedisError> { +pub async fn function_flush_cluster(client: &C, r#async: bool) -> Result<(), Error> { if !client.inner().config.server.is_clustered() { return function_flush(client, r#async).await.map(|_| ()); } @@ -323,17 +325,17 @@ pub async fn function_flush_cluster(client: &C, r#async: bool) -> vec![static_val!(SYNC)] }; - let response = ResponseKind::new_buffer(tx); - let command: RedisCommand = (RedisCommandKind::_FunctionFlushCluster, args, response).into(); + let response = ResponseKind::Respond(Some(tx)); + let command: Command = (CommandKind::_FunctionFlushCluster, args, response).into(); client.send_command(command)?; let _ = rx.await??; Ok(()) } -pub async fn function_kill(client: &C) -> Result { +pub async fn function_kill(client: &C) -> Result { let inner = client.inner(); - let command = RedisCommand::new(RedisCommandKind::FunctionKill, vec![]); + let command = Command::new(CommandKind::FunctionKill, vec![]); let frame = utils::backchannel_request_response(inner, command, true).await?; protocol_utils::frame_to_results(frame) @@ -343,7 +345,7 @@ pub async fn function_list( client: &C, library_name: Option, withcode: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(3); @@ -355,14 +357,14 @@ pub async fn function_list( args.push(static_val!(WITHCODE)); } - Ok((RedisCommandKind::FunctionList, args)) + Ok((CommandKind::FunctionList, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn function_load(client: &C, replace: bool, code: Str) -> Result { +pub async fn function_load(client: &C, replace: bool, code: Str) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); if replace { @@ -370,32 +372,27 @@ pub async fn function_load(client: &C, replace: bool, code: Str) } args.push(code.into()); - Ok((RedisCommandKind::FunctionLoad, args)) + Ok((CommandKind::FunctionLoad, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn function_load_cluster( - client: &C, - replace: bool, - code: Str, -) -> Result { +pub async fn function_load_cluster(client: &C, replace: bool, code: Str) -> Result { if !client.inner().config.server.is_clustered() { return function_load(client, replace, code).await; } let (tx, rx) = oneshot_channel(); - let mut args: Vec = Vec::with_capacity(2); + let mut args: Vec = Vec::with_capacity(2); if replace { args.push(static_val!(REPLACE)); } args.push(code.into()); - let response = ResponseKind::new_buffer(tx); - let mut command: RedisCommand = (RedisCommandKind::_FunctionLoadCluster, args, response).into(); - + let response = ResponseKind::Respond(Some(tx)); + let mut command: Command = (CommandKind::_FunctionLoadCluster, args, response).into(); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; @@ -405,10 +402,7 @@ pub async fn function_load_cluster( if let Some(frame) = data.pop() { protocol_utils::frame_to_results(frame) } else { - Err(RedisError::new( - RedisErrorKind::Protocol, - "Missing library name response frame.", - )) + Err(Error::new(ErrorKind::Protocol, "Missing library name response frame.")) } }, Resp3Frame::SimpleError { data, .. } => Err(protocol_utils::pretty_error(&data)), @@ -416,7 +410,7 @@ pub async fn function_load_cluster( let parsed = str::from_utf8(&data)?; Err(protocol_utils::pretty_error(parsed)) }, - _ => Err(RedisError::new(RedisErrorKind::Protocol, "Invalid response type.")), + _ => Err(Error::new(ErrorKind::Protocol, "Invalid response type.")), } } @@ -424,13 +418,13 @@ pub async fn function_restore( client: &C, serialized: Bytes, policy: FnPolicy, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); args.push(serialized.into()); args.push(policy.to_str().into()); - Ok((RedisCommandKind::FunctionRestore, args)) + Ok((CommandKind::FunctionRestore, args)) }) .await?; @@ -441,26 +435,25 @@ pub async fn function_restore_cluster( client: &C, serialized: Bytes, policy: FnPolicy, -) -> Result<(), RedisError> { +) -> Result<(), Error> { if !client.inner().config.server.is_clustered() { return function_restore(client, serialized, policy).await.map(|_| ()); } let (tx, rx) = oneshot_channel(); - let args: Vec = vec![serialized.into(), policy.to_str().into()]; - - let response = ResponseKind::new_buffer(tx); - let mut command: RedisCommand = (RedisCommandKind::_FunctionRestoreCluster, args, response).into(); + let args: Vec = vec![serialized.into(), policy.to_str().into()]; + let response = ResponseKind::Respond(Some(tx)); + let mut command: Command = (CommandKind::_FunctionRestoreCluster, args, response).into(); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } -pub async fn function_stats(client: &C) -> Result { +pub async fn function_stats(client: &C) -> Result { let inner = client.inner(); - let command = RedisCommand::new(RedisCommandKind::FunctionStats, vec![]); + let command = Command::new(CommandKind::FunctionStats, vec![]); let frame = utils::backchannel_request_response(inner, command, true).await?; protocol_utils::frame_to_results(frame) diff --git a/src/commands/impls/memory.rs b/src/commands/impls/memory.rs index 4ccfe500..5a8be572 100644 --- a/src/commands/impls/memory.rs +++ b/src/commands/impls/memory.rs @@ -1,6 +1,6 @@ use super::*; use crate::{ - protocol::{command::RedisCommandKind, utils as protocol_utils}, + protocol::{command::CommandKind, utils as protocol_utils}, types::*, utils, }; @@ -9,16 +9,12 @@ value_cmd!(memory_doctor, MemoryDoctor); value_cmd!(memory_malloc_stats, MemoryMallocStats); ok_cmd!(memory_purge, MemoryPurge); -pub async fn memory_stats(client: &C) -> Result { - let response = utils::request_response(client, || Ok((RedisCommandKind::MemoryStats, vec![]))).await?; +pub async fn memory_stats(client: &C) -> Result { + let response = utils::request_response(client, || Ok((CommandKind::MemoryStats, vec![]))).await?; protocol_utils::frame_to_results(response) } -pub async fn memory_usage( - client: &C, - key: RedisKey, - samples: Option, -) -> Result { +pub async fn memory_usage(client: &C, key: Key, samples: Option) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(3); args.push(key.into()); @@ -28,7 +24,7 @@ pub async fn memory_usage( args.push(samples.into()); } - Ok((RedisCommandKind::MemoryUsage, args)) + Ok((CommandKind::MemoryUsage, args)) }) .await?; diff --git a/src/commands/impls/mod.rs b/src/commands/impls/mod.rs index 39e113b5..5fed6058 100644 --- a/src/commands/impls/mod.rs +++ b/src/commands/impls/mod.rs @@ -2,10 +2,10 @@ #![allow(dead_code)] use crate::{ - error::RedisError, + error::Error, interfaces::ClientLike, - protocol::{command::RedisCommandKind, utils as protocol_utils}, - types::RedisValue, + protocol::{command::CommandKind, utils as protocol_utils}, + types::Value, utils, }; @@ -60,105 +60,80 @@ pub static WITHMATCHLEN: &str = "WITHMATCHLEN"; /// caller. macro_rules! ok_cmd( ($name:ident, $cmd:tt) => { - pub async fn $name(client: &C) -> Result<(), RedisError> { - let frame = crate::utils::request_response(client, || Ok((RedisCommandKind::$cmd, vec![]))).await?; + pub async fn $name(client: &C) -> Result<(), Error> { + let frame = crate::utils::request_response(client, || Ok((CommandKind::$cmd, vec![]))).await?; let response = crate::protocol::utils::frame_to_results(frame)?; crate::protocol::utils::expect_ok(&response) } } ); -// TODO clean this up -/// Macro to generate a command function that takes no arguments and returns a single `RedisValue` to the caller. +/// Macro to generate a command function that takes no arguments and returns a single `Value` to the caller. macro_rules! simple_cmd( ($name:ident, $cmd:tt, $res:ty) => { - pub async fn $name(client: &C) -> Result<$res, RedisError> { - let frame = crate::utils::request_response(client, || Ok((RedisCommandKind::$cmd, vec![]))).await?; + pub async fn $name(client: &C) -> Result<$res, Error> { + let frame = crate::utils::request_response(client, || Ok((CommandKind::$cmd, vec![]))).await?; crate::protocol::utils::frame_to_results(frame) } } ); -/// Macro to generate a command function that takes no arguments and returns a single `RedisValue` to the caller. +/// Macro to generate a command function that takes no arguments and returns a single `Value` to the caller. macro_rules! value_cmd( ($name:ident, $cmd:tt) => { - simple_cmd!($name, $cmd, RedisValue); + simple_cmd!($name, $cmd, Value); } ); -/// Macro to generate a command function that takes no arguments and returns a potentially nested `RedisValue` to the +/// Macro to generate a command function that takes no arguments and returns a potentially nested `Value` to the /// caller. macro_rules! values_cmd( ($name:ident, $cmd:tt) => { - pub async fn $name(client: &C) -> Result { - let frame = crate::utils::request_response(client, || Ok((RedisCommandKind::$cmd, vec![]))).await?; + pub async fn $name(client: &C) -> Result { + let frame = crate::utils::request_response(client, || Ok((CommandKind::$cmd, vec![]))).await?; crate::protocol::utils::frame_to_results(frame) } } ); -/// A function that issues a command that only takes one argument and returns a single `RedisValue`. -pub async fn one_arg_value_cmd( - client: &C, - kind: RedisCommandKind, - arg: RedisValue, -) -> Result { +/// A function that issues a command that only takes one argument and returns a single `Value`. +pub async fn one_arg_value_cmd(client: &C, kind: CommandKind, arg: Value) -> Result { let frame = utils::request_response(client, move || Ok((kind, vec![arg]))).await?; protocol_utils::frame_to_results(frame) } -/// A function that issues a command that only takes one argument and returns a potentially nested `RedisValue`. -pub async fn one_arg_values_cmd( - client: &C, - kind: RedisCommandKind, - arg: RedisValue, -) -> Result { +/// A function that issues a command that only takes one argument and returns a potentially nested `Value`. +pub async fn one_arg_values_cmd(client: &C, kind: CommandKind, arg: Value) -> Result { let frame = utils::request_response(client, move || Ok((kind, vec![arg]))).await?; protocol_utils::frame_to_results(frame) } /// A function that issues a command that only takes one argument and expects an OK response - returning `()` to the /// caller. -pub async fn one_arg_ok_cmd( - client: &C, - kind: RedisCommandKind, - arg: RedisValue, -) -> Result<(), RedisError> { +pub async fn one_arg_ok_cmd(client: &C, kind: CommandKind, arg: Value) -> Result<(), Error> { let frame = utils::request_response(client, move || Ok((kind, vec![arg]))).await?; let response = protocol_utils::frame_to_results(frame)?; protocol_utils::expect_ok(&response) } -/// A function that issues a command that takes any number of arguments and returns a single `RedisValue` to the +/// A function that issues a command that takes any number of arguments and returns a single `Value` to the /// caller. -pub async fn args_value_cmd( - client: &C, - kind: RedisCommandKind, - args: Vec, -) -> Result { +pub async fn args_value_cmd(client: &C, kind: CommandKind, args: Vec) -> Result { let frame = utils::request_response(client, move || Ok((kind, args))).await?; protocol_utils::frame_to_results(frame) } -/// A function that issues a command that takes any number of arguments and returns a potentially nested `RedisValue` +/// A function that issues a command that takes any number of arguments and returns a potentially nested `Value` /// to the caller. -pub async fn args_values_cmd( - client: &C, - kind: RedisCommandKind, - args: Vec, -) -> Result { +pub async fn args_values_cmd(client: &C, kind: CommandKind, args: Vec) -> Result { let frame = utils::request_response(client, move || Ok((kind, args))).await?; protocol_utils::frame_to_results(frame) } /// A function that issues a command that takes any number of arguments and expects an OK response - returning `()` to /// the caller. -pub async fn args_ok_cmd( - client: &C, - kind: RedisCommandKind, - args: Vec, -) -> Result<(), RedisError> { +pub async fn args_ok_cmd(client: &C, kind: CommandKind, args: Vec) -> Result<(), Error> { let frame = utils::request_response(client, move || Ok((kind, args))).await?; let response = protocol_utils::frame_to_results(frame)?; protocol_utils::expect_ok(&response) diff --git a/src/commands/impls/pubsub.rs b/src/commands/impls/pubsub.rs index 29489a16..78fda79b 100644 --- a/src/commands/impls/pubsub.rs +++ b/src/commands/impls/pubsub.rs @@ -1,7 +1,7 @@ use super::*; use crate::{ protocol::{ - command::{RedisCommand, RedisCommandKind}, + command::{Command, CommandKind}, utils as protocol_utils, }, types::*, @@ -10,7 +10,7 @@ use crate::{ use bytes_utils::Str; use redis_protocol::redis_keyslot; -fn cluster_hash_legacy_command(client: &C, command: &mut RedisCommand) { +fn cluster_hash_legacy_command(client: &C, command: &mut Command) { if client.is_clustered() { // send legacy (non-sharded) pubsub commands to the same node in a cluster so that `UNSUBSCRIBE` (without args) // works correctly. otherwise we'd have to send `UNSUBSCRIBE` to every node. @@ -19,58 +19,54 @@ fn cluster_hash_legacy_command(client: &C, command: &mut RedisCom } } -pub async fn subscribe(client: &C, channels: MultipleStrings) -> Result<(), RedisError> { +pub async fn subscribe(client: &C, channels: MultipleStrings) -> Result<(), Error> { let args = channels.inner().into_iter().map(|c| c.into()).collect(); - let mut command = RedisCommand::new(RedisCommandKind::Subscribe, args); + let mut command = Command::new(CommandKind::Subscribe, args); cluster_hash_legacy_command(client, &mut command); let frame = utils::request_response(client, move || Ok(command)).await?; protocol_utils::frame_to_results(frame).map(|_| ()) } -pub async fn unsubscribe(client: &C, channels: MultipleStrings) -> Result<(), RedisError> { +pub async fn unsubscribe(client: &C, channels: MultipleStrings) -> Result<(), Error> { let args = channels.inner().into_iter().map(|c| c.into()).collect(); - let mut command = RedisCommand::new(RedisCommandKind::Unsubscribe, args); + let mut command = Command::new(CommandKind::Unsubscribe, args); cluster_hash_legacy_command(client, &mut command); let frame = utils::request_response(client, move || Ok(command)).await?; protocol_utils::frame_to_results(frame).map(|_| ()) } -pub async fn publish(client: &C, channel: Str, message: RedisValue) -> Result { +pub async fn publish(client: &C, channel: Str, message: Value) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Publish, vec![channel.into(), message])) + Ok((CommandKind::Publish, vec![channel.into(), message])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn psubscribe(client: &C, patterns: MultipleStrings) -> Result<(), RedisError> { +pub async fn psubscribe(client: &C, patterns: MultipleStrings) -> Result<(), Error> { let args = patterns.inner().into_iter().map(|c| c.into()).collect(); - let mut command = RedisCommand::new(RedisCommandKind::Psubscribe, args); + let mut command = Command::new(CommandKind::Psubscribe, args); cluster_hash_legacy_command(client, &mut command); let frame = utils::request_response(client, move || Ok(command)).await?; protocol_utils::frame_to_results(frame).map(|_| ()) } -pub async fn punsubscribe(client: &C, patterns: MultipleStrings) -> Result<(), RedisError> { +pub async fn punsubscribe(client: &C, patterns: MultipleStrings) -> Result<(), Error> { let args = patterns.inner().into_iter().map(|c| c.into()).collect(); - let mut command = RedisCommand::new(RedisCommandKind::Punsubscribe, args); + let mut command = Command::new(CommandKind::Punsubscribe, args); cluster_hash_legacy_command(client, &mut command); let frame = utils::request_response(client, move || Ok(command)).await?; protocol_utils::frame_to_results(frame).map(|_| ()) } -pub async fn spublish( - client: &C, - channel: Str, - message: RedisValue, -) -> Result { +pub async fn spublish(client: &C, channel: Str, message: Value) -> Result { let frame = utils::request_response(client, move || { - let mut command: RedisCommand = (RedisCommandKind::Spublish, vec![channel.into(), message]).into(); + let mut command: Command = (CommandKind::Spublish, vec![channel.into(), message]).into(); command.hasher = ClusterHash::FirstKey; Ok(command) @@ -80,25 +76,25 @@ pub async fn spublish( protocol_utils::frame_to_results(frame) } -pub async fn ssubscribe(client: &C, channels: MultipleStrings) -> Result<(), RedisError> { +pub async fn ssubscribe(client: &C, channels: MultipleStrings) -> Result<(), Error> { let args = channels.inner().into_iter().map(|c| c.into()).collect(); - let mut command = RedisCommand::new(RedisCommandKind::Ssubscribe, args); + let mut command = Command::new(CommandKind::Ssubscribe, args); command.hasher = ClusterHash::FirstKey; let frame = utils::request_response(client, move || Ok(command)).await?; protocol_utils::frame_to_results(frame).map(|_| ()) } -pub async fn sunsubscribe(client: &C, channels: MultipleStrings) -> Result<(), RedisError> { +pub async fn sunsubscribe(client: &C, channels: MultipleStrings) -> Result<(), Error> { let args = channels.inner().into_iter().map(|c| c.into()).collect(); - let mut command = RedisCommand::new(RedisCommandKind::Sunsubscribe, args); + let mut command = Command::new(CommandKind::Sunsubscribe, args); command.hasher = ClusterHash::FirstKey; let frame = utils::request_response(client, move || Ok(command)).await?; protocol_utils::frame_to_results(frame).map(|_| ()) } -pub async fn pubsub_channels(client: &C, pattern: Str) -> Result { +pub async fn pubsub_channels(client: &C, pattern: Str) -> Result { let frame = utils::request_response(client, || { let args = if pattern.is_empty() { vec![] @@ -106,7 +102,7 @@ pub async fn pubsub_channels(client: &C, pattern: Str) -> Result< vec![pattern.into()] }; - let mut command: RedisCommand = RedisCommand::new(RedisCommandKind::PubsubChannels, args); + let mut command: Command = Command::new(CommandKind::PubsubChannels, args); cluster_hash_legacy_command(client, &mut command); Ok(command) @@ -116,9 +112,9 @@ pub async fn pubsub_channels(client: &C, pattern: Str) -> Result< protocol_utils::frame_to_results(frame) } -pub async fn pubsub_numpat(client: &C) -> Result { +pub async fn pubsub_numpat(client: &C) -> Result { let frame = utils::request_response(client, || { - let mut command: RedisCommand = RedisCommand::new(RedisCommandKind::PubsubNumpat, vec![]); + let mut command: Command = Command::new(CommandKind::PubsubNumpat, vec![]); cluster_hash_legacy_command(client, &mut command); Ok(command) @@ -128,10 +124,10 @@ pub async fn pubsub_numpat(client: &C) -> Result(client: &C, channels: MultipleStrings) -> Result { +pub async fn pubsub_numsub(client: &C, channels: MultipleStrings) -> Result { let frame = utils::request_response(client, || { - let args: Vec = channels.inner().into_iter().map(|s| s.into()).collect(); - let mut command: RedisCommand = RedisCommand::new(RedisCommandKind::PubsubNumsub, args); + let args: Vec = channels.inner().into_iter().map(|s| s.into()).collect(); + let mut command: Command = Command::new(CommandKind::PubsubNumsub, args); cluster_hash_legacy_command(client, &mut command); Ok(command) @@ -141,23 +137,18 @@ pub async fn pubsub_numsub(client: &C, channels: MultipleStrings) protocol_utils::frame_to_results(frame) } -pub async fn pubsub_shardchannels(client: &C, pattern: Str) -> Result { - let frame = utils::request_response(client, || { - Ok((RedisCommandKind::PubsubShardchannels, vec![pattern.into()])) - }) - .await?; +pub async fn pubsub_shardchannels(client: &C, pattern: Str) -> Result { + let frame = + utils::request_response(client, || Ok((CommandKind::PubsubShardchannels, vec![pattern.into()]))).await?; protocol_utils::frame_to_results(frame) } -pub async fn pubsub_shardnumsub( - client: &C, - channels: MultipleStrings, -) -> Result { +pub async fn pubsub_shardnumsub(client: &C, channels: MultipleStrings) -> Result { let frame = utils::request_response(client, || { - let args: Vec = channels.inner().into_iter().map(|s| s.into()).collect(); + let args: Vec = channels.inner().into_iter().map(|s| s.into()).collect(); let has_args = !args.is_empty(); - let mut command: RedisCommand = RedisCommand::new(RedisCommandKind::PubsubShardnumsub, args); + let mut command: Command = Command::new(CommandKind::PubsubShardnumsub, args); if !has_args { cluster_hash_legacy_command(client, &mut command); } diff --git a/src/commands/impls/redis_json.rs b/src/commands/impls/redis_json.rs index e0ca7999..c42934c2 100644 --- a/src/commands/impls/redis_json.rs +++ b/src/commands/impls/redis_json.rs @@ -1,18 +1,17 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - interfaces::{ClientLike, RedisResult}, - protocol::{command::RedisCommandKind, utils as protocol_utils}, - types::{MultipleKeys, MultipleStrings, RedisKey, RedisValue, SetOptions}, + error::{Error, ErrorKind}, + interfaces::{ClientLike, FredResult}, + protocol::{command::CommandKind, utils as protocol_utils}, + types::{Key, MultipleKeys, MultipleStrings, SetOptions, Value}, utils, }; use bytes_utils::Str; -use serde_json::Value; const INDENT: &str = "INDENT"; const NEWLINE: &str = "NEWLINE"; const SPACE: &str = "SPACE"; -fn key_path_args(key: RedisKey, path: Option, extra: usize) -> Vec { +fn key_path_args(key: Key, path: Option, extra: usize) -> Vec { let mut out = Vec::with_capacity(2 + extra); out.push(key.into()); if let Some(path) = path { @@ -22,51 +21,48 @@ fn key_path_args(key: RedisKey, path: Option, extra: usize) -> Vec Result { +fn value_to_bulk_str(value: &serde_json::Value) -> Result { Ok(match value { - Value::String(ref s) => RedisValue::String(Str::from(s)), - _ => RedisValue::String(Str::from(serde_json::to_string(value)?)), + serde_json::Value::String(ref s) => Value::String(Str::from(s)), + _ => Value::String(Str::from(serde_json::to_string(value)?)), }) } -/// Convert the provided json value to a redis value directly without serializing into a string. This only works with +/// Convert the provided json value to a `Value` directly without serializing into a string. This only works with /// scalar values. -fn json_to_redis(value: Value) -> Result { +fn json_to_value(value: serde_json::Value) -> Result { let out = match value { - Value::String(s) => Some(RedisValue::String(Str::from(s))), - Value::Null => Some(RedisValue::Null), - Value::Number(n) => { + serde_json::Value::String(s) => Some(Value::String(Str::from(s))), + serde_json::Value::Null => Some(Value::Null), + serde_json::Value::Number(n) => { if n.is_f64() { - n.as_f64().map(RedisValue::Double) + n.as_f64().map(Value::Double) } else { - n.as_i64().map(RedisValue::Integer) + n.as_i64().map(Value::Integer) } }, - Value::Bool(b) => Some(RedisValue::Boolean(b)), + serde_json::Value::Bool(b) => Some(Value::Boolean(b)), _ => None, }; - out.ok_or(RedisError::new( - RedisErrorKind::InvalidArgument, - "Expected string or number.", - )) + out.ok_or(Error::new(ErrorKind::InvalidArgument, "Expected string or number.")) } -fn values_to_bulk(values: &[Value]) -> Result, RedisError> { +fn values_to_bulk(values: &[serde_json::Value]) -> Result, Error> { values.iter().map(value_to_bulk_str).collect() } pub async fn json_arrappend( client: &C, - key: RedisKey, + key: Key, path: Str, - values: Vec, -) -> RedisResult { + values: Vec, +) -> FredResult { let frame = utils::request_response(client, || { let mut args = key_path_args(key, Some(path), values.len()); args.extend(values_to_bulk(&values)?); - Ok((RedisCommandKind::JsonArrAppend, args)) + Ok((CommandKind::JsonArrAppend, args)) }) .await?; protocol_utils::frame_to_results(frame) @@ -74,12 +70,12 @@ pub async fn json_arrappend( pub async fn json_arrindex( client: &C, - key: RedisKey, + key: Key, path: Str, - value: Value, + value: serde_json::Value, start: Option, stop: Option, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, || { let mut args = Vec::with_capacity(5); args.extend([key.into(), path.into(), value_to_bulk_str(&value)?]); @@ -90,7 +86,7 @@ pub async fn json_arrindex( args.push(stop.into()); } - Ok((RedisCommandKind::JsonArrIndex, args)) + Ok((CommandKind::JsonArrIndex, args)) }) .await?; protocol_utils::frame_to_results(frame) @@ -98,43 +94,40 @@ pub async fn json_arrindex( pub async fn json_arrinsert( client: &C, - key: RedisKey, + key: Key, path: Str, index: i64, - values: Vec, -) -> RedisResult { + values: Vec, +) -> FredResult { let frame = utils::request_response(client, || { let mut args = Vec::with_capacity(3 + values.len()); args.extend([key.into(), path.into(), index.into()]); args.extend(values_to_bulk(&values)?); - Ok((RedisCommandKind::JsonArrInsert, args)) + Ok((CommandKind::JsonArrInsert, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn json_arrlen(client: &C, key: RedisKey, path: Option) -> RedisResult { - let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonArrLen, key_path_args(key, path, 0))) - }) - .await?; +pub async fn json_arrlen(client: &C, key: Key, path: Option) -> FredResult { + let frame = utils::request_response(client, || Ok((CommandKind::JsonArrLen, key_path_args(key, path, 0)))).await?; protocol_utils::frame_to_results(frame) } pub async fn json_arrpop( client: &C, - key: RedisKey, + key: Key, path: Option, index: Option, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, || { let mut args = key_path_args(key, path, 1); if let Some(index) = index { args.push(index.into()); } - Ok((RedisCommandKind::JsonArrPop, args)) + Ok((CommandKind::JsonArrPop, args)) }) .await?; protocol_utils::frame_to_results(frame) @@ -142,13 +135,13 @@ pub async fn json_arrpop( pub async fn json_arrtrim( client: &C, - key: RedisKey, + key: Key, path: Str, start: i64, stop: i64, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonArrTrim, vec![ + Ok((CommandKind::JsonArrTrim, vec![ key.into(), path.into(), start.into(), @@ -159,42 +152,33 @@ pub async fn json_arrtrim( protocol_utils::frame_to_results(frame) } -pub async fn json_clear(client: &C, key: RedisKey, path: Option) -> RedisResult { - let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonClear, key_path_args(key, path, 0))) - }) - .await?; +pub async fn json_clear(client: &C, key: Key, path: Option) -> FredResult { + let frame = utils::request_response(client, || Ok((CommandKind::JsonClear, key_path_args(key, path, 0)))).await?; protocol_utils::frame_to_results(frame) } -pub async fn json_debug_memory( - client: &C, - key: RedisKey, - path: Option, -) -> RedisResult { +pub async fn json_debug_memory(client: &C, key: Key, path: Option) -> FredResult { let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonDebugMemory, key_path_args(key, path, 0))) + Ok((CommandKind::JsonDebugMemory, key_path_args(key, path, 0))) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn json_del(client: &C, key: RedisKey, path: Str) -> RedisResult { - let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonDel, key_path_args(key, Some(path), 0))) - }) - .await?; +pub async fn json_del(client: &C, key: Key, path: Str) -> FredResult { + let frame = + utils::request_response(client, || Ok((CommandKind::JsonDel, key_path_args(key, Some(path), 0)))).await?; protocol_utils::frame_to_results(frame) } pub async fn json_get( client: &C, - key: RedisKey, + key: Key, indent: Option, newline: Option, space: Option, paths: MultipleStrings, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, || { let mut args = Vec::with_capacity(7 + paths.len()); args.push(key.into()); @@ -212,7 +196,7 @@ pub async fn json_get( } args.extend(paths.into_values()); - Ok((RedisCommandKind::JsonGet, args)) + Ok((CommandKind::JsonGet, args)) }) .await?; protocol_utils::frame_to_results(frame) @@ -220,12 +204,12 @@ pub async fn json_get( pub async fn json_merge( client: &C, - key: RedisKey, + key: Key, path: Str, - value: Value, -) -> RedisResult { + value: serde_json::Value, +) -> FredResult { let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonMerge, vec![ + Ok((CommandKind::JsonMerge, vec![ key.into(), path.into(), value_to_bulk_str(&value)?, @@ -235,26 +219,26 @@ pub async fn json_merge( protocol_utils::frame_to_results(frame) } -pub async fn json_mget(client: &C, keys: MultipleKeys, path: Str) -> RedisResult { +pub async fn json_mget(client: &C, keys: MultipleKeys, path: Str) -> FredResult { let frame = utils::request_response(client, || { let mut args = Vec::with_capacity(keys.len() + 1); args.extend(keys.into_values()); args.push(path.into()); - Ok((RedisCommandKind::JsonMGet, args)) + Ok((CommandKind::JsonMGet, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn json_mset(client: &C, values: Vec<(RedisKey, Str, Value)>) -> RedisResult { +pub async fn json_mset(client: &C, values: Vec<(Key, Str, serde_json::Value)>) -> FredResult { let frame = utils::request_response(client, || { let mut args = Vec::with_capacity(values.len() * 3); for (key, path, value) in values.into_iter() { args.extend([key.into(), path.into(), value_to_bulk_str(&value)?]); } - Ok((RedisCommandKind::JsonMSet, args)) + Ok((CommandKind::JsonMSet, args)) }) .await?; protocol_utils::frame_to_results(frame) @@ -262,50 +246,43 @@ pub async fn json_mset(client: &C, values: Vec<(RedisKey, Str, Va pub async fn json_numincrby( client: &C, - key: RedisKey, + key: Key, path: Str, - value: Value, -) -> RedisResult { + value: serde_json::Value, +) -> FredResult { let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonNumIncrBy, vec![ + Ok((CommandKind::JsonNumIncrBy, vec![ key.into(), path.into(), - json_to_redis(value)?, + json_to_value(value)?, ])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn json_objkeys(client: &C, key: RedisKey, path: Option) -> RedisResult { - let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonObjKeys, key_path_args(key, path, 0))) - }) - .await?; +pub async fn json_objkeys(client: &C, key: Key, path: Option) -> FredResult { + let frame = utils::request_response(client, || Ok((CommandKind::JsonObjKeys, key_path_args(key, path, 0)))).await?; protocol_utils::frame_to_results(frame) } -pub async fn json_objlen(client: &C, key: RedisKey, path: Option) -> RedisResult { - let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonObjLen, key_path_args(key, path, 0))) - }) - .await?; +pub async fn json_objlen(client: &C, key: Key, path: Option) -> FredResult { + let frame = utils::request_response(client, || Ok((CommandKind::JsonObjLen, key_path_args(key, path, 0)))).await?; protocol_utils::frame_to_results(frame) } -pub async fn json_resp(client: &C, key: RedisKey, path: Option) -> RedisResult { - let frame = - utils::request_response(client, || Ok((RedisCommandKind::JsonResp, key_path_args(key, path, 0)))).await?; +pub async fn json_resp(client: &C, key: Key, path: Option) -> FredResult { + let frame = utils::request_response(client, || Ok((CommandKind::JsonResp, key_path_args(key, path, 0)))).await?; protocol_utils::frame_to_results(frame) } pub async fn json_set( client: &C, - key: RedisKey, + key: Key, path: Str, - value: Value, + value: serde_json::Value, options: Option, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, || { let mut args = key_path_args(key, Some(path), 2); args.push(value_to_bulk_str(&value)?); @@ -313,7 +290,7 @@ pub async fn json_set( args.push(options.to_str().into()); } - Ok((RedisCommandKind::JsonSet, args)) + Ok((CommandKind::JsonSet, args)) }) .await?; protocol_utils::frame_to_results(frame) @@ -321,38 +298,34 @@ pub async fn json_set( pub async fn json_strappend( client: &C, - key: RedisKey, + key: Key, path: Option, - value: Value, -) -> RedisResult { + value: serde_json::Value, +) -> FredResult { let frame = utils::request_response(client, || { let mut args = key_path_args(key, path, 1); args.push(value_to_bulk_str(&value)?); - Ok((RedisCommandKind::JsonStrAppend, args)) + Ok((CommandKind::JsonStrAppend, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn json_strlen(client: &C, key: RedisKey, path: Option) -> RedisResult { - let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonStrLen, key_path_args(key, path, 0))) - }) - .await?; +pub async fn json_strlen(client: &C, key: Key, path: Option) -> FredResult { + let frame = utils::request_response(client, || Ok((CommandKind::JsonStrLen, key_path_args(key, path, 0)))).await?; protocol_utils::frame_to_results(frame) } -pub async fn json_toggle(client: &C, key: RedisKey, path: Str) -> RedisResult { +pub async fn json_toggle(client: &C, key: Key, path: Str) -> FredResult { let frame = utils::request_response(client, || { - Ok((RedisCommandKind::JsonToggle, key_path_args(key, Some(path), 0))) + Ok((CommandKind::JsonToggle, key_path_args(key, Some(path), 0))) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn json_type(client: &C, key: RedisKey, path: Option) -> RedisResult { - let frame = - utils::request_response(client, || Ok((RedisCommandKind::JsonType, key_path_args(key, path, 0)))).await?; +pub async fn json_type(client: &C, key: Key, path: Option) -> FredResult { + let frame = utils::request_response(client, || Ok((CommandKind::JsonType, key_path_args(key, path, 0)))).await?; protocol_utils::frame_to_results(frame) } diff --git a/src/commands/impls/redisearch.rs b/src/commands/impls/redisearch.rs index 680a25f4..2f4270b8 100644 --- a/src/commands/impls/redisearch.rs +++ b/src/commands/impls/redisearch.rs @@ -1,21 +1,23 @@ use crate::{ commands::{args_values_cmd, one_arg_values_cmd, COUNT, LEN, LIMIT}, - error::RedisError, + error::Error, interfaces::ClientLike, - protocol::{command::RedisCommandKind, utils as protocol_utils}, + protocol::{command::CommandKind, utils as protocol_utils}, types::{ - AggregateOperation, - FtAggregateOptions, - FtAlterOptions, - FtCreateOptions, - FtSearchOptions, - Load, + redisearch::{ + AggregateOperation, + FtAggregateOptions, + FtAlterOptions, + FtCreateOptions, + FtSearchOptions, + Load, + SearchSchema, + SearchSchemaKind, + SpellcheckTerms, + }, + Key, MultipleStrings, - RedisKey, - RedisValue, - SearchSchema, - SearchSchemaKind, - SpellcheckTerms, + Value, }, utils, }; @@ -99,7 +101,7 @@ static GEO: &str = "GEO"; static VECTOR: &str = "VECTOR"; static GEOSHAPE: &str = "GEOSHAPE"; -fn gen_aggregate_op(args: &mut Vec, operation: AggregateOperation) -> Result<(), RedisError> { +fn gen_aggregate_op(args: &mut Vec, operation: AggregateOperation) -> Result<(), Error> { match operation { AggregateOperation::Filter { expression } => { args.extend([static_val!(FILTER), expression.into()]); @@ -140,7 +142,7 @@ fn gen_aggregate_op(args: &mut Vec, operation: AggregateOperation) - Ok(()) } -fn gen_aggregate_options(args: &mut Vec, options: FtAggregateOptions) -> Result<(), RedisError> { +fn gen_aggregate_options(args: &mut Vec, options: FtAggregateOptions) -> Result<(), Error> { if options.verbatim { args.push(static_val!(VERBATIM)); } @@ -190,7 +192,7 @@ fn gen_aggregate_options(args: &mut Vec, options: FtAggregateOptions Ok(()) } -fn gen_search_options(args: &mut Vec, options: FtSearchOptions) -> Result<(), RedisError> { +fn gen_search_options(args: &mut Vec, options: FtSearchOptions) -> Result<(), Error> { if options.nocontent { args.push(static_val!(NOCONTENT)); } @@ -300,7 +302,7 @@ fn gen_search_options(args: &mut Vec, options: FtSearchOptions) -> R args.push(static_val!(EXPLAINSCORE)); } if let Some(payload) = options.payload { - args.extend([static_val!(PAYLOAD), RedisValue::Bytes(payload)]); + args.extend([static_val!(PAYLOAD), Value::Bytes(payload)]); } if let Some(sort) = options.sortby { args.push(static_val!(SORTBY)); @@ -329,7 +331,7 @@ fn gen_search_options(args: &mut Vec, options: FtSearchOptions) -> R Ok(()) } -fn gen_schema_kind(args: &mut Vec, kind: SearchSchemaKind) -> Result<(), RedisError> { +fn gen_schema_kind(args: &mut Vec, kind: SearchSchemaKind) -> Result<(), Error> { match kind { SearchSchemaKind::Custom { name, arguments } => { args.push(name.into()); @@ -436,7 +438,7 @@ fn gen_schema_kind(args: &mut Vec, kind: SearchSchemaKind) -> Result Ok(()) } -fn gen_alter_options(args: &mut Vec, options: FtAlterOptions) -> Result<(), RedisError> { +fn gen_alter_options(args: &mut Vec, options: FtAlterOptions) -> Result<(), Error> { if options.skipinitialscan { args.push(static_val!(SKIPINITIALSCAN)); } @@ -446,7 +448,7 @@ fn gen_alter_options(args: &mut Vec, options: FtAlterOptions) -> Res Ok(()) } -fn gen_create_options(args: &mut Vec, options: FtCreateOptions) -> Result<(), RedisError> { +fn gen_create_options(args: &mut Vec, options: FtCreateOptions) -> Result<(), Error> { if let Some(kind) = options.on { args.extend([static_val!(ON), kind.to_str().into()]); } @@ -502,7 +504,7 @@ fn gen_create_options(args: &mut Vec, options: FtCreateOptions) -> R } // does not include the prefix SCHEMA -fn gen_schema_args(args: &mut Vec, options: SearchSchema) -> Result<(), RedisError> { +fn gen_schema_args(args: &mut Vec, options: SearchSchema) -> Result<(), Error> { args.push(options.field_name.into()); if let Some(alias) = options.alias { args.extend([static_val!(AS), alias.into()]); @@ -512,8 +514,8 @@ fn gen_schema_args(args: &mut Vec, options: SearchSchema) -> Result< Ok(()) } -pub async fn ft_list(client: &C) -> Result { - args_values_cmd(client, RedisCommandKind::FtList, vec![]).await +pub async fn ft_list(client: &C) -> Result { + args_values_cmd(client, CommandKind::FtList, vec![]).await } pub async fn ft_aggregate( @@ -521,14 +523,14 @@ pub async fn ft_aggregate( index: Str, query: Str, options: FtAggregateOptions, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2 + options.num_args()); args.push(index.into()); args.push(query.into()); gen_aggregate_options(&mut args, options)?; - Ok((RedisCommandKind::FtAggregate, args)) + Ok((CommandKind::FtAggregate, args)) }) .await?; @@ -540,13 +542,13 @@ pub async fn ft_search( index: Str, query: Str, options: FtSearchOptions, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2 + options.num_args()); args.extend([index.into(), query.into()]); gen_search_options(&mut args, options)?; - Ok((RedisCommandKind::FtSearch, args)) + Ok((CommandKind::FtSearch, args)) }) .await?; @@ -558,7 +560,7 @@ pub async fn ft_create( index: Str, options: FtCreateOptions, schema: Vec, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let schema_num_args = schema.iter().fold(0, |m, s| m + s.num_args()); let mut args = Vec::with_capacity(2 + options.num_args() + schema_num_args); @@ -570,86 +572,66 @@ pub async fn ft_create( gen_schema_args(&mut args, schema)?; } - Ok((RedisCommandKind::FtCreate, args)) + Ok((CommandKind::FtCreate, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ft_alter( - client: &C, - index: Str, - options: FtAlterOptions, -) -> Result { +pub async fn ft_alter(client: &C, index: Str, options: FtAlterOptions) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + options.num_args()); args.push(index.into()); gen_alter_options(&mut args, options)?; - Ok((RedisCommandKind::FtAlter, args)) + Ok((CommandKind::FtAlter, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ft_aliasadd(client: &C, alias: Str, index: Str) -> Result { - args_values_cmd(client, RedisCommandKind::FtAliasAdd, vec![alias.into(), index.into()]).await +pub async fn ft_aliasadd(client: &C, alias: Str, index: Str) -> Result { + args_values_cmd(client, CommandKind::FtAliasAdd, vec![alias.into(), index.into()]).await } -pub async fn ft_aliasdel(client: &C, alias: Str) -> Result { - args_values_cmd(client, RedisCommandKind::FtAliasDel, vec![alias.into()]).await +pub async fn ft_aliasdel(client: &C, alias: Str) -> Result { + args_values_cmd(client, CommandKind::FtAliasDel, vec![alias.into()]).await } -pub async fn ft_aliasupdate(client: &C, alias: Str, index: Str) -> Result { - args_values_cmd(client, RedisCommandKind::FtAliasUpdate, vec![ - alias.into(), - index.into(), - ]) - .await +pub async fn ft_aliasupdate(client: &C, alias: Str, index: Str) -> Result { + args_values_cmd(client, CommandKind::FtAliasUpdate, vec![alias.into(), index.into()]).await } -pub async fn ft_config_get(client: &C, option: Str) -> Result { - args_values_cmd(client, RedisCommandKind::FtConfigGet, vec![option.into()]).await +pub async fn ft_config_get(client: &C, option: Str) -> Result { + args_values_cmd(client, CommandKind::FtConfigGet, vec![option.into()]).await } -pub async fn ft_config_set( - client: &C, - option: Str, - value: RedisValue, -) -> Result { - args_values_cmd(client, RedisCommandKind::FtConfigSet, vec![option.into(), value]).await +pub async fn ft_config_set(client: &C, option: Str, value: Value) -> Result { + args_values_cmd(client, CommandKind::FtConfigSet, vec![option.into(), value]).await } -pub async fn ft_cursor_del( - client: &C, - index: Str, - cursor: RedisValue, -) -> Result { - args_values_cmd(client, RedisCommandKind::FtCursorDel, vec![index.into(), cursor]).await +pub async fn ft_cursor_del(client: &C, index: Str, cursor: Value) -> Result { + args_values_cmd(client, CommandKind::FtCursorDel, vec![index.into(), cursor]).await } pub async fn ft_cursor_read( client: &C, index: Str, - cursor: RedisValue, + cursor: Value, count: Option, -) -> Result { +) -> Result { let args = if let Some(count) = count { vec![index.into(), cursor, static_val!(COUNT), count.try_into()?] } else { vec![index.into(), cursor] }; - args_values_cmd(client, RedisCommandKind::FtCursorRead, args).await + args_values_cmd(client, CommandKind::FtCursorRead, args).await } -pub async fn ft_dictadd( - client: &C, - dict: Str, - terms: MultipleStrings, -) -> Result { +pub async fn ft_dictadd(client: &C, dict: Str, terms: MultipleStrings) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(terms.len() + 1); args.push(dict.into()); @@ -657,18 +639,14 @@ pub async fn ft_dictadd( args.push(term.into()); } - Ok((RedisCommandKind::FtDictAdd, args)) + Ok((CommandKind::FtDictAdd, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ft_dictdel( - client: &C, - dict: Str, - terms: MultipleStrings, -) -> Result { +pub async fn ft_dictdel(client: &C, dict: Str, terms: MultipleStrings) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(terms.len() + 1); args.push(dict.into()); @@ -676,25 +654,25 @@ pub async fn ft_dictdel( args.push(term.into()); } - Ok((RedisCommandKind::FtDictDel, args)) + Ok((CommandKind::FtDictDel, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ft_dictdump(client: &C, dict: Str) -> Result { - one_arg_values_cmd(client, RedisCommandKind::FtDictDump, dict.into()).await +pub async fn ft_dictdump(client: &C, dict: Str) -> Result { + one_arg_values_cmd(client, CommandKind::FtDictDump, dict.into()).await } -pub async fn ft_dropindex(client: &C, index: Str, dd: bool) -> Result { +pub async fn ft_dropindex(client: &C, index: Str, dd: bool) -> Result { let args = if dd { vec![index.into(), static_val!(DD)] } else { vec![index.into()] }; - args_values_cmd(client, RedisCommandKind::FtDropIndex, args).await + args_values_cmd(client, CommandKind::FtDropIndex, args).await } pub async fn ft_explain( @@ -702,18 +680,18 @@ pub async fn ft_explain( index: Str, query: Str, dialect: Option, -) -> Result { +) -> Result { let args = if let Some(dialect) = dialect { vec![index.into(), query.into(), static_val!(DIALECT), dialect.into()] } else { vec![index.into(), query.into()] }; - args_values_cmd(client, RedisCommandKind::FtExplain, args).await + args_values_cmd(client, CommandKind::FtExplain, args).await } -pub async fn ft_info(client: &C, index: Str) -> Result { - one_arg_values_cmd(client, RedisCommandKind::FtInfo, index.into()).await +pub async fn ft_info(client: &C, index: Str) -> Result { + one_arg_values_cmd(client, CommandKind::FtInfo, index.into()).await } pub async fn ft_spellcheck( @@ -723,7 +701,7 @@ pub async fn ft_spellcheck( distance: Option, terms: Option, dialect: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let terms_len = terms.as_ref().map(|t| t.num_args()).unwrap_or(0); let mut args = Vec::with_capacity(9 + terms_len); @@ -732,7 +710,7 @@ pub async fn ft_spellcheck( if let Some(distance) = distance { args.push(static_val!(DISTANCE)); - args.push(distance.into()); + args.push((distance as i64).into()); } if let Some(terms) = terms { args.push(static_val!(TERMS)); @@ -756,7 +734,7 @@ pub async fn ft_spellcheck( args.extend([static_val!(DIALECT), dialect.into()]); } - Ok((RedisCommandKind::FtSpellCheck, args)) + Ok((CommandKind::FtSpellCheck, args)) }) .await?; @@ -765,12 +743,12 @@ pub async fn ft_spellcheck( pub async fn ft_sugadd( client: &C, - key: RedisKey, + key: Key, string: Str, score: f64, incr: bool, payload: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(6); args.extend([key.into(), string.into(), score.try_into()?]); @@ -779,29 +757,29 @@ pub async fn ft_sugadd( args.push(static_val!(INCR)); } if let Some(payload) = payload { - args.extend([static_val!(PAYLOAD), RedisValue::Bytes(payload)]); + args.extend([static_val!(PAYLOAD), Value::Bytes(payload)]); } - Ok((RedisCommandKind::FtSugAdd, args)) + Ok((CommandKind::FtSugAdd, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ft_sugdel(client: &C, key: RedisKey, string: Str) -> Result { - args_values_cmd(client, RedisCommandKind::FtSugDel, vec![key.into(), string.into()]).await +pub async fn ft_sugdel(client: &C, key: Key, string: Str) -> Result { + args_values_cmd(client, CommandKind::FtSugDel, vec![key.into(), string.into()]).await } pub async fn ft_sugget( client: &C, - key: RedisKey, + key: Key, prefix: Str, fuzzy: bool, withscores: bool, withpayloads: bool, max: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(7); args.push(key.into()); @@ -819,19 +797,19 @@ pub async fn ft_sugget( args.extend([static_val!(MAX), max.try_into()?]); } - Ok((RedisCommandKind::FtSugGet, args)) + Ok((CommandKind::FtSugGet, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ft_suglen(client: &C, key: RedisKey) -> Result { - one_arg_values_cmd(client, RedisCommandKind::FtSugLen, key.into()).await +pub async fn ft_suglen(client: &C, key: Key) -> Result { + one_arg_values_cmd(client, CommandKind::FtSugLen, key.into()).await } -pub async fn ft_syndump(client: &C, index: Str) -> Result { - one_arg_values_cmd(client, RedisCommandKind::FtSynDump, index.into()).await +pub async fn ft_syndump(client: &C, index: Str) -> Result { + one_arg_values_cmd(client, CommandKind::FtSynDump, index.into()).await } pub async fn ft_synupdate( @@ -840,7 +818,7 @@ pub async fn ft_synupdate( synonym_group_id: Str, skipinitialscan: bool, terms: MultipleStrings, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(3 + terms.len()); args.push(index.into()); @@ -852,17 +830,13 @@ pub async fn ft_synupdate( args.push(term.into()); } - Ok((RedisCommandKind::FtSynUpdate, args)) + Ok((CommandKind::FtSynUpdate, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ft_tagvals(client: &C, index: Str, field_name: Str) -> Result { - args_values_cmd(client, RedisCommandKind::FtTagVals, vec![ - index.into(), - field_name.into(), - ]) - .await +pub async fn ft_tagvals(client: &C, index: Str, field_name: Str) -> Result { + args_values_cmd(client, CommandKind::FtTagVals, vec![index.into(), field_name.into()]).await } diff --git a/src/commands/impls/scan.rs b/src/commands/impls/scan.rs index 347bccfd..8dfa7d6e 100644 --- a/src/commands/impls/scan.rs +++ b/src/commands/impls/scan.rs @@ -2,25 +2,22 @@ use super::*; use crate::{ error::*, interfaces, - modules::inner::RedisClientInner, + modules::inner::ClientInner, protocol::{ - command::{RedisCommand, RedisCommandKind}, + command::{Command, CommandKind}, responders::ResponseKind, types::*, }, - runtime::{rx_stream, unbounded_channel, RefCount}, - types::*, + runtime::{channel, RefCount}, + types::{scan::*, ClusterHash, Key, Value}, utils, }; use bytes_utils::Str; use futures::stream::{Stream, TryStreamExt}; -#[cfg(feature = "glommio")] -use crate::runtime::UnboundedSender; - static STARTING_CURSOR: &str = "0"; -fn values_args(key: RedisKey, pattern: Str, count: Option) -> Vec { +fn values_args(key: Key, pattern: Str, count: Option) -> Vec { let mut args = Vec::with_capacity(6); args.push(key.into()); args.push(static_val!(STARTING_CURSOR)); @@ -35,8 +32,14 @@ fn values_args(key: RedisKey, pattern: Str, count: Option) -> Vec, pattern: Str, count: Option, r#type: Option) { - args.push(static_val!(STARTING_CURSOR)); +fn create_scan_args( + args: &mut Vec, + pattern: Str, + count: Option, + r#type: Option, + cursor: Option, +) { + args.push(cursor.unwrap_or_else(|| static_val!(STARTING_CURSOR))); args.push(static_val!(MATCH)); args.push(pattern.into()); @@ -50,27 +53,37 @@ fn create_scan_args(args: &mut Vec, pattern: Str, count: Option } } +fn pattern_hash_slot(inner: &RefCount, pattern: &str) -> Option { + if inner.config.server.is_clustered() { + if utils::clustered_scan_pattern_has_hash_tag(inner, pattern) { + Some(redis_protocol::redis_keyslot(pattern.as_bytes())) + } else { + None + } + } else { + None + } +} + pub fn scan_cluster( - inner: &RefCount, + inner: &RefCount, pattern: Str, count: Option, r#type: Option, -) -> impl Stream> { - let (tx, rx) = unbounded_channel(); - #[cfg(feature = "glommio")] - let tx: UnboundedSender<_> = tx.into(); +) -> impl Stream> { + let (tx, rx) = channel(0); let hash_slots = inner.with_cluster_state(|state| Ok(state.unique_hash_slots())); let hash_slots = match hash_slots { Ok(slots) => slots, Err(e) => { - let _ = tx.send(Err(e)); - return rx_stream(rx); + let _ = tx.try_send(Err(e)); + return rx.into_stream(); }, }; let mut args = Vec::with_capacity(7); - create_scan_args(&mut args, pattern, count, r#type); + create_scan_args(&mut args, pattern, count, r#type, None); for slot in hash_slots.into_iter() { _trace!(inner, "Scan cluster hash slot server: {}", slot); let response = ResponseKind::KeyScan(KeyScanInner { @@ -80,38 +93,36 @@ pub fn scan_cluster( tx: tx.clone(), server: None, }); - let command: RedisCommand = (RedisCommandKind::Scan, Vec::new(), response).into(); + let command: Command = (CommandKind::Scan, Vec::new(), response).into(); if let Err(e) = interfaces::default_send_command(inner, command) { - let _ = tx.send(Err(e)); + let _ = tx.try_send(Err(e)); break; } } - rx_stream(rx) + rx.into_stream() } pub fn scan_cluster_buffered( - inner: &RefCount, + inner: &RefCount, pattern: Str, count: Option, r#type: Option, -) -> impl Stream> { - let (tx, rx) = unbounded_channel(); - #[cfg(feature = "glommio")] - let tx: UnboundedSender<_> = tx.into(); +) -> impl Stream> { + let (tx, rx) = channel(0); let hash_slots = inner.with_cluster_state(|state| Ok(state.unique_hash_slots())); let hash_slots = match hash_slots { Ok(slots) => slots, Err(e) => { - let _ = tx.send(Err(e)); - return rx_stream(rx); + let _ = tx.try_send(Err(e)); + return rx.into_stream(); }, }; let mut args = Vec::with_capacity(7); - create_scan_args(&mut args, pattern, count, r#type); + create_scan_args(&mut args, pattern, count, r#type, None); for slot in hash_slots.into_iter() { _trace!(inner, "Scan cluster buffered hash slot server: {}", slot); let response = ResponseKind::KeyScanBuffered(KeyScanBufferedInner { @@ -121,79 +132,85 @@ pub fn scan_cluster_buffered( tx: tx.clone(), server: None, }); - let command: RedisCommand = (RedisCommandKind::Scan, Vec::new(), response).into(); + let command: Command = (CommandKind::Scan, Vec::new(), response).into(); if let Err(e) = interfaces::default_send_command(inner, command) { - let _ = tx.send(Err(e)); + let _ = tx.try_send(Err(e)); break; } } - rx_stream(rx) + rx.into_stream() } -pub fn scan( - inner: &RefCount, +pub async fn scan_page( + client: &C, + cursor: Str, pattern: Str, count: Option, r#type: Option, server: Option, -) -> impl Stream> { - let (tx, rx) = unbounded_channel(); - #[cfg(feature = "glommio")] - let tx: UnboundedSender<_> = tx.into(); - - let hash_slot = if inner.config.server.is_clustered() { - if utils::clustered_scan_pattern_has_hash_tag(inner, &pattern) { - Some(redis_protocol::redis_keyslot(pattern.as_bytes())) - } else { - None + cluster_hash: Option, +) -> Result { + let frame = utils::request_response(client, move || { + let hash_slot = pattern_hash_slot(client.inner(), &pattern); + let mut args = Vec::with_capacity(7); + create_scan_args(&mut args, pattern, count, r#type, Some(cursor.into())); + + let mut command = Command::new(CommandKind::Scan, args); + if let Some(server) = server { + command.cluster_node = Some(server); + } else if let Some(hasher) = cluster_hash { + command.hasher = hasher; + } else if let Some(slot) = hash_slot { + command.hasher = ClusterHash::Custom(slot); } - } else { - None - }; + Ok(command) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub fn scan( + inner: &RefCount, + pattern: Str, + count: Option, + r#type: Option, +) -> impl Stream> { + let (tx, rx) = channel(0); + let hash_slot = pattern_hash_slot(inner, &pattern); let mut args = Vec::with_capacity(7); - create_scan_args(&mut args, pattern, count, r#type); + create_scan_args(&mut args, pattern, count, r#type, None); let response = ResponseKind::KeyScan(KeyScanInner { hash_slot, args, - server, + server: None, cursor_idx: 0, tx: tx.clone(), }); - let command: RedisCommand = (RedisCommandKind::Scan, Vec::new(), response).into(); + let command: Command = (CommandKind::Scan, Vec::new(), response).into(); if let Err(e) = interfaces::default_send_command(inner, command) { - let _ = tx.send(Err(e)); + let _ = tx.try_send(Err(e)); } - rx_stream(rx) + rx.into_stream() } pub fn scan_buffered( - inner: &RefCount, + inner: &RefCount, pattern: Str, count: Option, r#type: Option, server: Option, -) -> impl Stream> { - let (tx, rx) = unbounded_channel(); - #[cfg(feature = "glommio")] - let tx: UnboundedSender<_> = tx.into(); - - let hash_slot = if inner.config.server.is_clustered() { - if utils::clustered_scan_pattern_has_hash_tag(inner, &pattern) { - Some(redis_protocol::redis_keyslot(pattern.as_bytes())) - } else { - None - } - } else { - None - }; +) -> impl Stream> { + let (tx, rx) = channel(0); + let hash_slot = pattern_hash_slot(inner, &pattern); let mut args = Vec::with_capacity(7); - create_scan_args(&mut args, pattern, count, r#type); + create_scan_args(&mut args, pattern, count, r#type, None); let response = ResponseKind::KeyScanBuffered(KeyScanBufferedInner { hash_slot, args, @@ -201,101 +218,95 @@ pub fn scan_buffered( cursor_idx: 0, tx: tx.clone(), }); - let command: RedisCommand = (RedisCommandKind::Scan, Vec::new(), response).into(); + let command: Command = (CommandKind::Scan, Vec::new(), response).into(); if let Err(e) = interfaces::default_send_command(inner, command) { - let _ = tx.send(Err(e)); + let _ = tx.try_send(Err(e)); } - rx_stream(rx) + rx.into_stream() } pub fn hscan( - inner: &RefCount, - key: RedisKey, + inner: &RefCount, + key: Key, pattern: Str, count: Option, -) -> impl Stream> { - let (tx, rx) = unbounded_channel(); +) -> impl Stream> { + let (tx, rx) = channel(0); let args = values_args(key, pattern, count); - #[cfg(feature = "glommio")] - let tx: UnboundedSender<_> = tx.into(); let response = ResponseKind::ValueScan(ValueScanInner { tx: tx.clone(), cursor_idx: 1, args, }); - let command: RedisCommand = (RedisCommandKind::Hscan, Vec::new(), response).into(); - + let command: Command = (CommandKind::Hscan, Vec::new(), response).into(); if let Err(e) = interfaces::default_send_command(inner, command) { - let _ = tx.send(Err(e)); + let _ = tx.try_send(Err(e)); } - rx_stream(rx).try_filter_map(|result| async move { + rx.into_stream().try_filter_map(|result| async move { match result { ValueScanResult::HScan(res) => Ok(Some(res)), - _ => Err(RedisError::new(RedisErrorKind::Protocol, "Expected HSCAN result.")), + _ => Err(Error::new(ErrorKind::Protocol, "Expected HSCAN result.")), } }) } pub fn sscan( - inner: &RefCount, - key: RedisKey, + inner: &RefCount, + key: Key, pattern: Str, count: Option, -) -> impl Stream> { - let (tx, rx) = unbounded_channel(); +) -> impl Stream> { + let (tx, rx) = channel(0); let args = values_args(key, pattern, count); - #[cfg(feature = "glommio")] - let tx: UnboundedSender<_> = tx.into(); let response = ResponseKind::ValueScan(ValueScanInner { tx: tx.clone(), cursor_idx: 1, args, }); - let command: RedisCommand = (RedisCommandKind::Sscan, Vec::new(), response).into(); + let command: Command = (CommandKind::Sscan, Vec::new(), response).into(); if let Err(e) = interfaces::default_send_command(inner, command) { - let _ = tx.send(Err(e)); + let _ = tx.try_send(Err(e)); } - rx_stream(rx).try_filter_map(|result| async move { + rx.into_stream().try_filter_map(|result| async move { match result { ValueScanResult::SScan(res) => Ok(Some(res)), - _ => Err(RedisError::new(RedisErrorKind::Protocol, "Expected SSCAN result.")), + _ => Err(Error::new(ErrorKind::Protocol, "Expected SSCAN result.")), } }) } pub fn zscan( - inner: &RefCount, - key: RedisKey, + inner: &RefCount, + key: Key, pattern: Str, count: Option, -) -> impl Stream> { - let (tx, rx) = unbounded_channel(); +) -> impl Stream> { + let inner = inner.clone(); + let (tx, rx) = channel(0); let args = values_args(key, pattern, count); - #[cfg(feature = "glommio")] - let tx: UnboundedSender<_> = tx.into(); let response = ResponseKind::ValueScan(ValueScanInner { tx: tx.clone(), cursor_idx: 1, args, }); - let command: RedisCommand = (RedisCommandKind::Zscan, Vec::new(), response).into(); + let command: Command = (CommandKind::Zscan, Vec::new(), response).into(); - if let Err(e) = interfaces::default_send_command(inner, command) { - let _ = tx.send(Err(e)); + if let Err(e) = interfaces::default_send_command(&inner, command) { + let _ = tx.try_send(Err(e)); } - rx_stream(rx).try_filter_map(|result| async move { + rx.into_stream().try_filter_map(|result| async move { match result { ValueScanResult::ZScan(res) => Ok(Some(res)), - _ => Err(RedisError::new(RedisErrorKind::Protocol, "Expected ZSCAN result.")), + _ => Err(Error::new(ErrorKind::Protocol, "Expected ZSCAN result.")), } }) } diff --git a/src/commands/impls/sentinel.rs b/src/commands/impls/sentinel.rs index 2b08e951..dba6017b 100644 --- a/src/commands/impls/sentinel.rs +++ b/src/commands/impls/sentinel.rs @@ -1,7 +1,7 @@ use super::*; use crate::{ - error::RedisError, - protocol::{command::RedisCommandKind, utils as protocol_utils}, + error::Error, + protocol::{command::CommandKind, utils as protocol_utils}, router::sentinel::{ CKQUORUM, CONFIG, @@ -26,19 +26,19 @@ use crate::{ use bytes_utils::Str; use std::net::IpAddr; -pub async fn config_get(client: &C, name: Str) -> Result { +pub async fn config_get(client: &C, name: Str) -> Result { let frame = utils::request_response(client, move || { let args = vec![static_val!(CONFIG), static_val!(GET), name.into()]; - Ok((RedisCommandKind::Sentinel, args)) + Ok((CommandKind::Sentinel, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn config_set(client: &C, name: Str, value: RedisValue) -> Result { +pub async fn config_set(client: &C, name: Str, value: Value) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![ + Ok((CommandKind::Sentinel, vec![ static_val!(CONFIG), static_val!(SET), name.into(), @@ -50,31 +50,31 @@ pub async fn config_set(client: &C, name: Str, value: RedisValue) protocol_utils::frame_to_results(frame) } -pub async fn ckquorum(client: &C, name: Str) -> Result { +pub async fn ckquorum(client: &C, name: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![static_val!(CKQUORUM), name.into()])) + Ok((CommandKind::Sentinel, vec![static_val!(CKQUORUM), name.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn flushconfig(client: &C) -> Result { - args_values_cmd(client, RedisCommandKind::Sentinel, vec![static_val!(FLUSHCONFIG)]).await +pub async fn flushconfig(client: &C) -> Result { + args_values_cmd(client, CommandKind::Sentinel, vec![static_val!(FLUSHCONFIG)]).await } -pub async fn failover(client: &C, name: Str) -> Result { +pub async fn failover(client: &C, name: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![static_val!(FAILOVER), name.into()])) + Ok((CommandKind::Sentinel, vec![static_val!(FAILOVER), name.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn get_master_addr_by_name(client: &C, name: Str) -> Result { +pub async fn get_master_addr_by_name(client: &C, name: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![ + Ok((CommandKind::Sentinel, vec![ static_val!(GET_MASTER_ADDR_BY_NAME), name.into(), ])) @@ -84,17 +84,17 @@ pub async fn get_master_addr_by_name(client: &C, name: Str) -> Re protocol_utils::frame_to_results(frame) } -pub async fn info_cache(client: &C) -> Result { - args_values_cmd(client, RedisCommandKind::Sentinel, vec![static_val!(INFO_CACHE)]).await +pub async fn info_cache(client: &C) -> Result { + args_values_cmd(client, CommandKind::Sentinel, vec![static_val!(INFO_CACHE)]).await } -pub async fn masters(client: &C) -> Result { - args_values_cmd(client, RedisCommandKind::Sentinel, vec![static_val!(MASTERS)]).await +pub async fn masters(client: &C) -> Result { + args_values_cmd(client, CommandKind::Sentinel, vec![static_val!(MASTERS)]).await } -pub async fn master(client: &C, name: Str) -> Result { +pub async fn master(client: &C, name: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![static_val!(MASTER), name.into()])) + Ok((CommandKind::Sentinel, vec![static_val!(MASTER), name.into()])) }) .await?; @@ -107,10 +107,10 @@ pub async fn monitor( ip: IpAddr, port: u16, quorum: u32, -) -> Result { +) -> Result { let ip = ip.to_string(); let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![ + Ok((CommandKind::Sentinel, vec![ static_val!(MONITOR), name.into(), ip.into(), @@ -123,42 +123,42 @@ pub async fn monitor( protocol_utils::frame_to_results(frame) } -pub async fn myid(client: &C) -> Result { - args_values_cmd(client, RedisCommandKind::Sentinel, vec![static_val!(MYID)]).await +pub async fn myid(client: &C) -> Result { + args_values_cmd(client, CommandKind::Sentinel, vec![static_val!(MYID)]).await } -pub async fn pending_scripts(client: &C) -> Result { - args_values_cmd(client, RedisCommandKind::Sentinel, vec![static_val!(PENDING_SCRIPTS)]).await +pub async fn pending_scripts(client: &C) -> Result { + args_values_cmd(client, CommandKind::Sentinel, vec![static_val!(PENDING_SCRIPTS)]).await } -pub async fn remove(client: &C, name: Str) -> Result { +pub async fn remove(client: &C, name: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![static_val!(REMOVE), name.into()])) + Ok((CommandKind::Sentinel, vec![static_val!(REMOVE), name.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn replicas(client: &C, name: Str) -> Result { +pub async fn replicas(client: &C, name: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![static_val!(REPLICAS), name.into()])) + Ok((CommandKind::Sentinel, vec![static_val!(REPLICAS), name.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn sentinels(client: &C, name: Str) -> Result { +pub async fn sentinels(client: &C, name: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![static_val!(SENTINELS), name.into()])) + Ok((CommandKind::Sentinel, vec![static_val!(SENTINELS), name.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn set(client: &C, name: Str, options: RedisMap) -> Result { +pub async fn set(client: &C, name: Str, options: Map) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2 + options.len()); args.push(static_val!(SET)); @@ -168,19 +168,16 @@ pub async fn set(client: &C, name: Str, options: RedisMap) -> Res args.push(key.into()); args.push(value); } - Ok((RedisCommandKind::Sentinel, args)) + Ok((CommandKind::Sentinel, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn simulate_failure( - client: &C, - kind: SentinelFailureKind, -) -> Result { +pub async fn simulate_failure(client: &C, kind: SentinelFailureKind) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![ + Ok((CommandKind::Sentinel, vec![ static_val!(SIMULATE_FAILURE), kind.to_str().into(), ])) @@ -190,9 +187,9 @@ pub async fn simulate_failure( protocol_utils::frame_to_results(frame) } -pub async fn reset(client: &C, pattern: Str) -> Result { +pub async fn reset(client: &C, pattern: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Sentinel, vec![static_val!(RESET), pattern.into()])) + Ok((CommandKind::Sentinel, vec![static_val!(RESET), pattern.into()])) }) .await?; diff --git a/src/commands/impls/server.rs b/src/commands/impls/server.rs index 849be25f..662d4966 100644 --- a/src/commands/impls/server.rs +++ b/src/commands/impls/server.rs @@ -1,12 +1,12 @@ use super::*; use crate::{ - clients::RedisClient, + clients::Client, error::*, interfaces, - modules::inner::RedisClientInner, - prelude::Resp3Frame, + modules::inner::ClientInner, + prelude::{Resp3Frame, ServerConfig}, protocol::{ - command::{RedisCommand, RedisCommandKind, RouterCommand}, + command::{Command, CommandKind, RouterCommand}, responders::ResponseKind, utils as protocol_utils, }, @@ -16,41 +16,28 @@ use crate::{ }; use bytes_utils::Str; -pub async fn active_connections(client: &C) -> Result, RedisError> { - let (tx, rx) = oneshot_channel(); - let command = RouterCommand::Connections { tx }; - interfaces::send_to_router(client.inner(), command)?; - - rx.await.map_err(|e| e.into()) -} - -pub async fn quit(client: &C) -> Result<(), RedisError> { +pub async fn quit(client: &C) -> Result<(), Error> { let inner = client.inner().clone(); _debug!(inner, "Closing Redis connection with Quit command."); let (tx, rx) = oneshot_channel(); - let mut command: RedisCommand = if inner.config.server.is_clustered() { - let response = ResponseKind::new_buffer(tx); - (RedisCommandKind::Quit, vec![], response).into() - } else { - let response = ResponseKind::Respond(Some(tx)); - (RedisCommandKind::Quit, vec![], response).into() - }; - utils::set_client_state(&inner.state, ClientState::Disconnecting); - inner.notifications.broadcast_close(); + let response = ResponseKind::Respond(Some(tx)); + let mut command: Command = (CommandKind::Quit, vec![], response).into(); + inner.set_client_state(ClientState::Disconnecting); + inner.notifications.broadcast_close(); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; let _ = utils::timeout(rx, timeout_dur).await??; inner .notifications .close_public_receivers(inner.with_perf_config(|c| c.broadcast_channel_capacity)); - inner.backchannel.write().await.check_and_disconnect(&inner, None).await; + inner.backchannel.check_and_disconnect(&inner, None).await; Ok(()) } -pub async fn shutdown(client: &C, flags: Option) -> Result<(), RedisError> { +pub async fn shutdown(client: &C, flags: Option) -> Result<(), Error> { let inner = client.inner().clone(); _debug!(inner, "Shutting down server."); @@ -60,14 +47,14 @@ pub async fn shutdown(client: &C, flags: Option) - Vec::new() }; let (tx, rx) = oneshot_channel(); - let mut command: RedisCommand = if inner.config.server.is_clustered() { + let mut command: Command = if inner.config.server.is_clustered() { let response = ResponseKind::new_buffer(tx); - (RedisCommandKind::Shutdown, args, response).into() + (CommandKind::Shutdown, args, response).into() } else { let response = ResponseKind::Respond(Some(tx)); - (RedisCommandKind::Shutdown, args, response).into() + (CommandKind::Shutdown, args, response).into() }; - utils::set_client_state(&inner.state, ClientState::Disconnecting); + inner.set_client_state(ClientState::Disconnecting); inner.notifications.broadcast_close(); let timeout_dur = utils::prepare_command(client, &mut command); @@ -76,18 +63,15 @@ pub async fn shutdown(client: &C, flags: Option) - inner .notifications .close_public_receivers(inner.with_perf_config(|c| c.broadcast_channel_capacity)); - inner.backchannel.write().await.check_and_disconnect(&inner, None).await; + inner.backchannel.check_and_disconnect(&inner, None).await; Ok(()) } /// Create a new client struct for each unique primary cluster node based on the cached cluster state. -pub fn split(inner: &RefCount) -> Result, RedisError> { +pub fn split(inner: &RefCount) -> Result, Error> { if !inner.config.server.is_clustered() { - return Err(RedisError::new( - RedisErrorKind::Config, - "Expected clustered redis deployment.", - )); + return Err(Error::new(ErrorKind::Config, "Expected clustered redis deployment.")); } let servers = inner.with_cluster_state(|state| Ok(state.unique_primary_nodes()))?; _debug!(inner, "Unique primary nodes in split: {:?}", servers); @@ -102,13 +86,13 @@ pub fn split(inner: &RefCount) -> Result, Red let policy = inner.reconnect_policy(); let connection = inner.connection_config(); - RedisClient::new(config, Some(perf), Some(connection), policy) + Client::new(config, Some(perf), Some(connection), policy) }) .collect(), ) } -pub async fn force_reconnection(inner: &RefCount) -> Result<(), RedisError> { +pub async fn force_reconnection(inner: &RefCount) -> Result<(), Error> { let (tx, rx) = oneshot_channel(); let command = RouterCommand::Reconnect { server: None, @@ -122,21 +106,21 @@ pub async fn force_reconnection(inner: &RefCount) -> Result<() rx.await?.map(|_| ()) } -pub async fn flushall(client: &C, r#async: bool) -> Result { +pub async fn flushall(client: &C, r#async: bool) -> Result { let args = if r#async { vec![static_val!(ASYNC)] } else { Vec::new() }; - let frame = utils::request_response(client, move || Ok((RedisCommandKind::FlushAll, args))).await?; + let frame = utils::request_response(client, move || Ok((CommandKind::FlushAll, args))).await?; protocol_utils::frame_to_results(frame) } -pub async fn flushall_cluster(client: &C) -> Result<(), RedisError> { +pub async fn flushall_cluster(client: &C) -> Result<(), Error> { if !client.inner().config.server.is_clustered() { return flushall(client, false).await.map(|_| ()); } let (tx, rx) = oneshot_channel(); - let response = ResponseKind::new_buffer(tx); - let mut command: RedisCommand = (RedisCommandKind::_FlushAllCluster, vec![], response).into(); + let response = ResponseKind::Respond(Some(tx)); + let mut command: Command = (CommandKind::_FlushAllCluster, vec![], response).into(); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; @@ -144,24 +128,29 @@ pub async fn flushall_cluster(client: &C) -> Result<(), RedisErro Ok(()) } -pub async fn ping(client: &C) -> Result { - let frame = utils::request_response(client, || Ok((RedisCommandKind::Ping, vec![]))).await?; +pub async fn ping(client: &C, message: Option) -> Result { + let mut args = Vec::with_capacity(1); + if let Some(message) = message { + args.push(message.into()); + } + + let frame = utils::request_response(client, || Ok((CommandKind::Ping, args))).await?; protocol_utils::frame_to_results(frame) } -pub async fn select(client: &C, db: u8) -> Result { - let frame = utils::request_response(client, || Ok((RedisCommandKind::Select, vec![(db as i64).into()]))).await?; +pub async fn select(client: &C, index: Value) -> Result { + let frame = utils::request_response(client, || Ok((CommandKind::Select, vec![index]))).await?; protocol_utils::frame_to_results(frame) } -pub async fn info(client: &C, section: Option) -> Result { +pub async fn info(client: &C, section: Option) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1); if let Some(section) = section { args.push(section.to_str().into()); } - Ok((RedisCommandKind::Info, args)) + Ok((CommandKind::Info, args)) }) .await?; @@ -173,7 +162,7 @@ pub async fn hello( version: RespVersion, auth: Option<(Str, Str)>, setname: Option, -) -> Result<(), RedisError> { +) -> Result<(), Error> { let mut args = if let Some((username, password)) = auth { vec![username.into(), password.into()] } else { @@ -185,21 +174,21 @@ pub async fn hello( if client.inner().config.server.is_clustered() { let (tx, rx) = oneshot_channel(); - let mut command: RedisCommand = RedisCommandKind::_HelloAllCluster(version).into(); - command.response = ResponseKind::new_buffer(tx); + let mut command: Command = CommandKind::_HelloAllCluster(version).into(); + command.response = ResponseKind::Respond(Some(tx)); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } else { - let frame = utils::request_response(client, move || Ok((RedisCommandKind::_Hello(version), args))).await?; + let frame = utils::request_response(client, move || Ok((CommandKind::_Hello(version), args))).await?; let _ = protocol_utils::frame_to_results(frame)?; Ok(()) } } -pub async fn auth(client: &C, username: Option, password: Str) -> Result<(), RedisError> { +pub async fn auth(client: &C, username: Option, password: Str) -> Result<(), Error> { let mut args = Vec::with_capacity(2); if let Some(username) = username { args.push(username.into()); @@ -208,35 +197,31 @@ pub async fn auth(client: &C, username: Option, password: if client.inner().config.server.is_clustered() { let (tx, rx) = oneshot_channel(); - let response = ResponseKind::new_buffer(tx); - let mut command: RedisCommand = (RedisCommandKind::_AuthAllCluster, args, response).into(); + let response = ResponseKind::Respond(Some(tx)); + let mut command: Command = (CommandKind::_AuthAllCluster, args, response).into(); let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } else { - let frame = utils::request_response(client, move || Ok((RedisCommandKind::Auth, args))).await?; + let frame = utils::request_response(client, move || Ok((CommandKind::Auth, args))).await?; let response = protocol_utils::frame_to_results(frame)?; protocol_utils::expect_ok(&response) } } -pub async fn custom( - client: &C, - cmd: CustomCommand, - args: Vec, -) -> Result { - args_values_cmd(client, RedisCommandKind::_Custom(cmd), args).await +pub async fn custom(client: &C, cmd: CustomCommand, args: Vec) -> Result { + args_values_cmd(client, CommandKind::_Custom(cmd), args).await } pub async fn custom_raw( client: &C, cmd: CustomCommand, - args: Vec, -) -> Result { - utils::request_response(client, move || Ok((RedisCommandKind::_Custom(cmd), args))).await + args: Vec, +) -> Result { + utils::request_response(client, move || Ok((CommandKind::_Custom(cmd), args))).await } #[cfg(feature = "i-server")] @@ -253,7 +238,7 @@ pub async fn failover( force: bool, abort: bool, timeout: Option, -) -> Result<(), RedisError> { +) -> Result<(), Error> { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(7); if let Some((host, port)) = to { @@ -272,7 +257,7 @@ pub async fn failover( args.push(timeout.into()); } - Ok((RedisCommandKind::Failover, args)) + Ok((CommandKind::Failover, args)) }) .await?; @@ -284,9 +269,9 @@ pub async fn failover( value_cmd!(lastsave, LastSave); #[cfg(feature = "i-server")] -pub async fn wait(client: &C, numreplicas: i64, timeout: i64) -> Result { +pub async fn wait(client: &C, numreplicas: i64, timeout: i64) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::Wait, vec![numreplicas.into(), timeout.into()])) + Ok((CommandKind::Wait, vec![numreplicas.into(), timeout.into()])) }) .await?; diff --git a/src/commands/impls/sets.rs b/src/commands/impls/sets.rs index 5ae47e82..2bab7d65 100644 --- a/src/commands/impls/sets.rs +++ b/src/commands/impls/sets.rs @@ -1,16 +1,12 @@ use super::*; use crate::{ - protocol::{command::RedisCommandKind, utils as protocol_utils}, + protocol::{command::CommandKind, utils as protocol_utils}, types::*, utils, }; use std::convert::TryInto; -pub async fn sadd( - client: &C, - key: RedisKey, - members: MultipleValues, -) -> Result { +pub async fn sadd(client: &C, key: Key, members: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let members = members.into_multiple_values(); let mut args = Vec::with_capacity(1 + members.len()); @@ -19,36 +15,32 @@ pub async fn sadd( for member in members.into_iter() { args.push(member); } - Ok((RedisCommandKind::Sadd, args)) + Ok((CommandKind::Sadd, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn scard(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Scard, key.into()).await +pub async fn scard(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Scard, key.into()).await } -pub async fn sdiff(client: &C, keys: MultipleKeys) -> Result { +pub async fn sdiff(client: &C, keys: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len()); for key in keys.inner().into_iter() { args.push(key.into()); } - Ok((RedisCommandKind::Sdiff, args)) + Ok((CommandKind::Sdiff, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn sdiffstore( - client: &C, - dest: RedisKey, - keys: MultipleKeys, -) -> Result { +pub async fn sdiffstore(client: &C, dest: Key, keys: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + keys.len()); args.push(dest.into()); @@ -56,32 +48,28 @@ pub async fn sdiffstore( for key in keys.inner().into_iter() { args.push(key.into()); } - Ok((RedisCommandKind::Sdiffstore, args)) + Ok((CommandKind::Sdiffstore, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn sinter(client: &C, keys: MultipleKeys) -> Result { +pub async fn sinter(client: &C, keys: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len()); for key in keys.inner().into_iter() { args.push(key.into()); } - Ok((RedisCommandKind::Sinter, args)) + Ok((CommandKind::Sinter, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn sinterstore( - client: &C, - dest: RedisKey, - keys: MultipleKeys, -) -> Result { +pub async fn sinterstore(client: &C, dest: Key, keys: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + keys.len()); args.push(dest.into()); @@ -89,26 +77,18 @@ pub async fn sinterstore( for key in keys.inner().into_iter() { args.push(key.into()); } - Ok((RedisCommandKind::Sinterstore, args)) + Ok((CommandKind::Sinterstore, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn sismember( - client: &C, - key: RedisKey, - member: RedisValue, -) -> Result { - args_value_cmd(client, RedisCommandKind::Sismember, vec![key.into(), member]).await +pub async fn sismember(client: &C, key: Key, member: Value) -> Result { + args_value_cmd(client, CommandKind::Sismember, vec![key.into(), member]).await } -pub async fn smismember( - client: &C, - key: RedisKey, - members: MultipleValues, -) -> Result { +pub async fn smismember(client: &C, key: Key, members: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let members = members.into_multiple_values(); let mut args = Vec::with_capacity(1 + members.len()); @@ -117,28 +97,23 @@ pub async fn smismember( for member in members.into_iter() { args.push(member); } - Ok((RedisCommandKind::Smismember, args)) + Ok((CommandKind::Smismember, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn smembers(client: &C, key: RedisKey) -> Result { - one_arg_values_cmd(client, RedisCommandKind::Smembers, key.into()).await +pub async fn smembers(client: &C, key: Key) -> Result { + one_arg_values_cmd(client, CommandKind::Smembers, key.into()).await } -pub async fn smove( - client: &C, - source: RedisKey, - dest: RedisKey, - member: RedisValue, -) -> Result { +pub async fn smove(client: &C, source: Key, dest: Key, member: Value) -> Result { let args = vec![source.into(), dest.into(), member]; - args_value_cmd(client, RedisCommandKind::Smove, args).await + args_value_cmd(client, CommandKind::Smove, args).await } -pub async fn spop(client: &C, key: RedisKey, count: Option) -> Result { +pub async fn spop(client: &C, key: Key, count: Option) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); args.push(key.into()); @@ -146,18 +121,14 @@ pub async fn spop(client: &C, key: RedisKey, count: Option if let Some(count) = count { args.push(count.try_into()?); } - Ok((RedisCommandKind::Spop, args)) + Ok((CommandKind::Spop, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn srandmember( - client: &C, - key: RedisKey, - count: Option, -) -> Result { +pub async fn srandmember(client: &C, key: Key, count: Option) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); args.push(key.into()); @@ -165,18 +136,14 @@ pub async fn srandmember( if let Some(count) = count { args.push(count.try_into()?); } - Ok((RedisCommandKind::Srandmember, args)) + Ok((CommandKind::Srandmember, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn srem( - client: &C, - key: RedisKey, - members: MultipleValues, -) -> Result { +pub async fn srem(client: &C, key: Key, members: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let members = members.into_multiple_values(); let mut args = Vec::with_capacity(1 + members.len()); @@ -185,32 +152,28 @@ pub async fn srem( for member in members.into_iter() { args.push(member); } - Ok((RedisCommandKind::Srem, args)) + Ok((CommandKind::Srem, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn sunion(client: &C, keys: MultipleKeys) -> Result { +pub async fn sunion(client: &C, keys: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len()); for key in keys.inner().into_iter() { args.push(key.into()); } - Ok((RedisCommandKind::Sunion, args)) + Ok((CommandKind::Sunion, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn sunionstore( - client: &C, - dest: RedisKey, - keys: MultipleKeys, -) -> Result { +pub async fn sunionstore(client: &C, dest: Key, keys: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + keys.len()); args.push(dest.into()); @@ -218,7 +181,7 @@ pub async fn sunionstore( for key in keys.inner().into_iter() { args.push(key.into()); } - Ok((RedisCommandKind::Sunionstore, args)) + Ok((CommandKind::Sunionstore, args)) }) .await?; diff --git a/src/commands/impls/slowlog.rs b/src/commands/impls/slowlog.rs index 06275636..bae1e1d7 100644 --- a/src/commands/impls/slowlog.rs +++ b/src/commands/impls/slowlog.rs @@ -1,10 +1,10 @@ use super::*; use crate::{ - protocol::{command::RedisCommandKind, utils as protocol_utils}, + protocol::{command::CommandKind, utils as protocol_utils}, utils, }; -pub async fn slowlog_get(client: &C, count: Option) -> Result { +pub async fn slowlog_get(client: &C, count: Option) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); args.push(static_val!(GET)); @@ -13,18 +13,18 @@ pub async fn slowlog_get(client: &C, count: Option) -> Resul args.push(count.into()); } - Ok((RedisCommandKind::Slowlog, args)) + Ok((CommandKind::Slowlog, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn slowlog_length(client: &C) -> Result { - let frame = utils::request_response(client, || Ok((RedisCommandKind::Slowlog, vec![LEN.into()]))).await?; +pub async fn slowlog_length(client: &C) -> Result { + let frame = utils::request_response(client, || Ok((CommandKind::Slowlog, vec![LEN.into()]))).await?; protocol_utils::frame_to_results(frame) } -pub async fn slowlog_reset(client: &C) -> Result<(), RedisError> { - args_ok_cmd(client, RedisCommandKind::Slowlog, vec![static_val!(RESET)]).await +pub async fn slowlog_reset(client: &C) -> Result<(), Error> { + args_ok_cmd(client, CommandKind::Slowlog, vec![static_val!(RESET)]).await } diff --git a/src/commands/impls/sorted_sets.rs b/src/commands/impls/sorted_sets.rs index 8486586e..3b947b19 100644 --- a/src/commands/impls/sorted_sets.rs +++ b/src/commands/impls/sorted_sets.rs @@ -1,32 +1,42 @@ use super::*; use crate::{ error::*, - protocol::{command::RedisCommandKind, utils as protocol_utils}, - types::*, + protocol::{command::CommandKind, utils as protocol_utils}, + types::{ + sorted_sets::{ + AggregateOptions, + MultipleWeights, + MultipleZaddValues, + Ordering, + ZCmp, + ZRange, + ZRangeBound, + ZSort, + }, + *, + }, utils, }; use std::convert::TryInto; static INCR: &str = "INCR"; static WITH_SCORES: &str = "WITHSCORES"; +static WITH_SCORE: &str = "WITHSCORE"; static AGGREGATE: &str = "AGGREGATE"; static WEIGHTS: &str = "WEIGHTS"; -fn new_range_error(kind: &Option) -> Result<(), RedisError> { +fn new_range_error(kind: &Option) -> Result<(), Error> { if let Some(ref sort) = *kind { - Err(RedisError::new( - RedisErrorKind::InvalidArgument, + Err(Error::new( + ErrorKind::InvalidArgument, format!("Invalid range bound with {} sort", sort.to_str()), )) } else { - Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "Invalid index range bound.", - )) + Err(Error::new(ErrorKind::InvalidArgument, "Invalid index range bound.")) } } -fn check_range_type(range: &ZRange, kind: &Option) -> Result<(), RedisError> { +fn check_range_type(range: &ZRange, kind: &Option) -> Result<(), Error> { match kind { Some(_kind) => match _kind { ZSort::ByLex => match range.range { @@ -45,7 +55,7 @@ fn check_range_type(range: &ZRange, kind: &Option) -> Result<(), RedisErr } } -fn check_range_types(min: &ZRange, max: &ZRange, kind: &Option) -> Result<(), RedisError> { +fn check_range_types(min: &ZRange, max: &ZRange, kind: &Option) -> Result<(), Error> { check_range_type(min, kind)?; check_range_type(max, kind)?; Ok(()) @@ -57,8 +67,8 @@ pub async fn bzmpop( keys: MultipleKeys, sort: ZCmp, count: Option, -) -> Result { - let timeout: RedisValue = timeout.try_into()?; +) -> Result { + let timeout: Value = timeout.try_into()?; let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len() + 4); @@ -73,7 +83,7 @@ pub async fn bzmpop( args.push(count.into()); } - Ok((RedisCommandKind::BzmPop, args)) + Ok((CommandKind::BzmPop, args)) }) .await?; @@ -81,7 +91,7 @@ pub async fn bzmpop( protocol_utils::frame_to_results(frame) } -pub async fn bzpopmin(client: &C, keys: MultipleKeys, timeout: f64) -> Result { +pub async fn bzpopmin(client: &C, keys: MultipleKeys, timeout: f64) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + keys.len()); @@ -90,7 +100,7 @@ pub async fn bzpopmin(client: &C, keys: MultipleKeys, timeout: f6 } args.push(timeout.try_into()?); - Ok((RedisCommandKind::BzPopMin, args)) + Ok((CommandKind::BzPopMin, args)) }) .await?; @@ -98,7 +108,7 @@ pub async fn bzpopmin(client: &C, keys: MultipleKeys, timeout: f6 protocol_utils::frame_to_results(frame) } -pub async fn bzpopmax(client: &C, keys: MultipleKeys, timeout: f64) -> Result { +pub async fn bzpopmax(client: &C, keys: MultipleKeys, timeout: f64) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + keys.len()); @@ -107,7 +117,7 @@ pub async fn bzpopmax(client: &C, keys: MultipleKeys, timeout: f6 } args.push(timeout.try_into()?); - Ok((RedisCommandKind::BzPopMax, args)) + Ok((CommandKind::BzPopMax, args)) }) .await?; @@ -117,13 +127,13 @@ pub async fn bzpopmax(client: &C, keys: MultipleKeys, timeout: f6 pub async fn zadd( client: &C, - key: RedisKey, + key: Key, options: Option, ordering: Option, changed: bool, incr: bool, values: MultipleZaddValues, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(5 + (values.len() * 2)); args.push(key.into()); @@ -146,27 +156,23 @@ pub async fn zadd( args.push(value); } - Ok((RedisCommandKind::Zadd, args)) + Ok((CommandKind::Zadd, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn zcard(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Zcard, key.into()).await +pub async fn zcard(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Zcard, key.into()).await } -pub async fn zcount(client: &C, key: RedisKey, min: f64, max: f64) -> Result { +pub async fn zcount(client: &C, key: Key, min: f64, max: f64) -> Result { let (min, max) = (min.try_into()?, max.try_into()?); - args_value_cmd(client, RedisCommandKind::Zcount, vec![key.into(), min, max]).await + args_value_cmd(client, CommandKind::Zcount, vec![key.into(), min, max]).await } -pub async fn zdiff( - client: &C, - keys: MultipleKeys, - withscores: bool, -) -> Result { +pub async fn zdiff(client: &C, keys: MultipleKeys, withscores: bool) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2 + keys.len()); args.push(keys.len().try_into()?); @@ -178,18 +184,14 @@ pub async fn zdiff( args.push(static_val!(WITH_SCORES)); } - Ok((RedisCommandKind::Zdiff, args)) + Ok((CommandKind::Zdiff, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn zdiffstore( - client: &C, - dest: RedisKey, - keys: MultipleKeys, -) -> Result { +pub async fn zdiffstore(client: &C, dest: Key, keys: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2 + keys.len()); args.push(dest.into()); @@ -198,22 +200,17 @@ pub async fn zdiffstore( for key in keys.inner().into_iter() { args.push(key.into()); } - Ok((RedisCommandKind::Zdiffstore, args)) + Ok((CommandKind::Zdiffstore, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn zincrby( - client: &C, - key: RedisKey, - increment: f64, - member: RedisValue, -) -> Result { +pub async fn zincrby(client: &C, key: Key, increment: f64, member: Value) -> Result { let increment = increment.try_into()?; let args = vec![key.into(), increment, member]; - args_value_cmd(client, RedisCommandKind::Zincrby, args).await + args_value_cmd(client, CommandKind::Zincrby, args).await } pub async fn zinter( @@ -222,7 +219,7 @@ pub async fn zinter( weights: MultipleWeights, aggregate: Option, withscores: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let args_len = 6 + keys.len() + weights.len(); let mut args = Vec::with_capacity(args_len); @@ -245,7 +242,7 @@ pub async fn zinter( args.push(static_val!(WITH_SCORES)); } - Ok((RedisCommandKind::Zinter, args)) + Ok((CommandKind::Zinter, args)) }) .await?; @@ -254,11 +251,11 @@ pub async fn zinter( pub async fn zinterstore( client: &C, - dest: RedisKey, + dest: Key, keys: MultipleKeys, weights: MultipleWeights, aggregate: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let args_len = 5 + keys.len() + weights.len(); let mut args = Vec::with_capacity(args_len); @@ -279,51 +276,38 @@ pub async fn zinterstore( args.push(options.to_str().into()); } - Ok((RedisCommandKind::Zinterstore, args)) + Ok((CommandKind::Zinterstore, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn zlexcount( - client: &C, - key: RedisKey, - min: ZRange, - max: ZRange, -) -> Result { +pub async fn zlexcount(client: &C, key: Key, min: ZRange, max: ZRange) -> Result { check_range_types(&min, &max, &Some(ZSort::ByLex))?; let args = vec![key.into(), min.into_value()?, max.into_value()?]; - args_value_cmd(client, RedisCommandKind::Zlexcount, args).await + args_value_cmd(client, CommandKind::Zlexcount, args).await } -pub async fn zpopmax( - client: &C, - key: RedisKey, - count: Option, -) -> Result { +pub async fn zpopmax(client: &C, key: Key, count: Option) -> Result { let args = if let Some(count) = count { vec![key.into(), count.try_into()?] } else { vec![key.into()] }; - args_values_cmd(client, RedisCommandKind::Zpopmax, args).await + args_values_cmd(client, CommandKind::Zpopmax, args).await } -pub async fn zpopmin( - client: &C, - key: RedisKey, - count: Option, -) -> Result { +pub async fn zpopmin(client: &C, key: Key, count: Option) -> Result { let args = if let Some(count) = count { vec![key.into(), count.try_into()?] } else { vec![key.into()] }; - args_values_cmd(client, RedisCommandKind::Zpopmin, args).await + args_values_cmd(client, CommandKind::Zpopmin, args).await } pub async fn zmpop( @@ -331,7 +315,7 @@ pub async fn zmpop( keys: MultipleKeys, sort: ZCmp, count: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len() + 3); args.push(keys.len().try_into()?); @@ -344,18 +328,14 @@ pub async fn zmpop( args.push(count.into()); } - Ok((RedisCommandKind::Zmpop, args)) + Ok((CommandKind::Zmpop, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn zrandmember( - client: &C, - key: RedisKey, - count: Option<(i64, bool)>, -) -> Result { +pub async fn zrandmember(client: &C, key: Key, count: Option<(i64, bool)>) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(3); args.push(key.into()); @@ -367,7 +347,7 @@ pub async fn zrandmember( } } - Ok((RedisCommandKind::Zrandmember, args)) + Ok((CommandKind::Zrandmember, args)) }) .await?; @@ -376,14 +356,14 @@ pub async fn zrandmember( pub async fn zrangestore( client: &C, - dest: RedisKey, - source: RedisKey, + dest: Key, + source: Key, min: ZRange, max: ZRange, sort: Option, rev: bool, limit: Option, -) -> Result { +) -> Result { check_range_types(&min, &max, &sort)?; let frame = utils::request_response(client, move || { @@ -405,7 +385,7 @@ pub async fn zrangestore( args.push(count.into()); } - Ok((RedisCommandKind::Zrangestore, args)) + Ok((CommandKind::Zrangestore, args)) }) .await?; @@ -414,14 +394,14 @@ pub async fn zrangestore( pub async fn zrange( client: &C, - key: RedisKey, + key: Key, min: ZRange, max: ZRange, sort: Option, rev: bool, limit: Option, withscores: bool, -) -> Result { +) -> Result { check_range_types(&min, &max, &sort)?; let frame = utils::request_response(client, move || { @@ -445,7 +425,7 @@ pub async fn zrange( args.push(static_val!(WITH_SCORES)); } - Ok((RedisCommandKind::Zrange, args)) + Ok((CommandKind::Zrange, args)) }) .await?; @@ -454,11 +434,11 @@ pub async fn zrange( pub async fn zrangebylex( client: &C, - key: RedisKey, + key: Key, min: ZRange, max: ZRange, limit: Option, -) -> Result { +) -> Result { check_range_types(&min, &max, &Some(ZSort::ByLex))?; let frame = utils::request_response(client, move || { @@ -473,7 +453,7 @@ pub async fn zrangebylex( args.push(count.into()); } - Ok((RedisCommandKind::Zrangebylex, args)) + Ok((CommandKind::Zrangebylex, args)) }) .await?; @@ -482,11 +462,11 @@ pub async fn zrangebylex( pub async fn zrevrangebylex( client: &C, - key: RedisKey, + key: Key, max: ZRange, min: ZRange, limit: Option, -) -> Result { +) -> Result { check_range_types(&min, &max, &Some(ZSort::ByLex))?; let frame = utils::request_response(client, move || { @@ -501,7 +481,7 @@ pub async fn zrevrangebylex( args.push(count.into()); } - Ok((RedisCommandKind::Zrevrangebylex, args)) + Ok((CommandKind::Zrevrangebylex, args)) }) .await?; @@ -510,12 +490,12 @@ pub async fn zrevrangebylex( pub async fn zrangebyscore( client: &C, - key: RedisKey, + key: Key, min: ZRange, max: ZRange, withscores: bool, limit: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(7); args.push(key.into()); @@ -531,7 +511,7 @@ pub async fn zrangebyscore( args.push(count.into()); } - Ok((RedisCommandKind::Zrangebyscore, args)) + Ok((CommandKind::Zrangebyscore, args)) }) .await?; @@ -540,12 +520,12 @@ pub async fn zrangebyscore( pub async fn zrevrangebyscore( client: &C, - key: RedisKey, + key: Key, max: ZRange, min: ZRange, withscores: bool, limit: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(7); args.push(key.into()); @@ -561,22 +541,23 @@ pub async fn zrevrangebyscore( args.push(count.into()); } - Ok((RedisCommandKind::Zrevrangebyscore, args)) + Ok((CommandKind::Zrevrangebyscore, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn zrank(client: &C, key: RedisKey, member: RedisValue) -> Result { - args_value_cmd(client, RedisCommandKind::Zrank, vec![key.into(), member]).await +pub async fn zrank(client: &C, key: Key, member: Value, withscore: bool) -> Result { + let mut args = vec![key.into(), member]; + if withscore { + args.push(static_val!(WITH_SCORE)); + } + + args_value_cmd(client, CommandKind::Zrank, args).await } -pub async fn zrem( - client: &C, - key: RedisKey, - members: MultipleValues, -) -> Result { +pub async fn zrem(client: &C, key: Key, members: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let members = members.into_multiple_values(); let mut args = Vec::with_capacity(1 + members.len()); @@ -585,23 +566,18 @@ pub async fn zrem( for member in members.into_iter() { args.push(member); } - Ok((RedisCommandKind::Zrem, args)) + Ok((CommandKind::Zrem, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn zremrangebylex( - client: &C, - key: RedisKey, - min: ZRange, - max: ZRange, -) -> Result { +pub async fn zremrangebylex(client: &C, key: Key, min: ZRange, max: ZRange) -> Result { let frame = utils::request_response(client, move || { check_range_types(&min, &max, &Some(ZSort::ByLex))?; - Ok((RedisCommandKind::Zremrangebylex, vec![ + Ok((CommandKind::Zremrangebylex, vec![ key.into(), min.into_value()?, max.into_value()?, @@ -612,26 +588,16 @@ pub async fn zremrangebylex( protocol_utils::frame_to_results(frame) } -pub async fn zremrangebyrank( - client: &C, - key: RedisKey, - start: i64, - stop: i64, -) -> Result { +pub async fn zremrangebyrank(client: &C, key: Key, start: i64, stop: i64) -> Result { let (start, stop) = (start.into(), stop.into()); - args_value_cmd(client, RedisCommandKind::Zremrangebyrank, vec![key.into(), start, stop]).await + args_value_cmd(client, CommandKind::Zremrangebyrank, vec![key.into(), start, stop]).await } -pub async fn zremrangebyscore( - client: &C, - key: RedisKey, - min: ZRange, - max: ZRange, -) -> Result { +pub async fn zremrangebyscore(client: &C, key: Key, min: ZRange, max: ZRange) -> Result { let frame = utils::request_response(client, move || { check_range_types(&min, &max, &Some(ZSort::ByScore))?; - Ok((RedisCommandKind::Zremrangebyscore, vec![ + Ok((CommandKind::Zremrangebyscore, vec![ key.into(), min.into_value()?, max.into_value()?, @@ -644,11 +610,11 @@ pub async fn zremrangebyscore( pub async fn zrevrange( client: &C, - key: RedisKey, + key: Key, start: i64, stop: i64, withscores: bool, -) -> Result { +) -> Result { let (start, stop) = (start.into(), stop.into()); let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(4); @@ -660,23 +626,24 @@ pub async fn zrevrange( args.push(static_val!(WITH_SCORES)); } - Ok((RedisCommandKind::Zrevrange, args)) + Ok((CommandKind::Zrevrange, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn zrevrank( - client: &C, - key: RedisKey, - member: RedisValue, -) -> Result { - args_value_cmd(client, RedisCommandKind::Zrevrank, vec![key.into(), member]).await +pub async fn zrevrank(client: &C, key: Key, member: Value, withscore: bool) -> Result { + let mut args = vec![key.into(), member]; + if withscore { + args.push(static_val!(WITH_SCORE)); + } + + args_value_cmd(client, CommandKind::Zrevrank, args).await } -pub async fn zscore(client: &C, key: RedisKey, member: RedisValue) -> Result { - args_value_cmd(client, RedisCommandKind::Zscore, vec![key.into(), member]).await +pub async fn zscore(client: &C, key: Key, member: Value) -> Result { + args_value_cmd(client, CommandKind::Zscore, vec![key.into(), member]).await } pub async fn zunion( @@ -685,7 +652,7 @@ pub async fn zunion( weights: MultipleWeights, aggregate: Option, withscores: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let args_len = keys.len() + weights.len(); let mut args = Vec::with_capacity(5 + args_len); @@ -709,7 +676,7 @@ pub async fn zunion( args.push(static_val!(WITH_SCORES)); } - Ok((RedisCommandKind::Zunion, args)) + Ok((CommandKind::Zunion, args)) }) .await?; @@ -718,11 +685,11 @@ pub async fn zunion( pub async fn zunionstore( client: &C, - dest: RedisKey, + dest: Key, keys: MultipleKeys, weights: MultipleWeights, aggregate: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let args_len = keys.len() + weights.len(); let mut args = Vec::with_capacity(5 + args_len); @@ -744,18 +711,14 @@ pub async fn zunionstore( args.push(aggregate.to_str().into()); } - Ok((RedisCommandKind::Zunionstore, args)) + Ok((CommandKind::Zunionstore, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn zmscore( - client: &C, - key: RedisKey, - members: MultipleValues, -) -> Result { +pub async fn zmscore(client: &C, key: Key, members: MultipleValues) -> Result { let frame = utils::request_response(client, move || { let members = members.into_multiple_values(); let mut args = Vec::with_capacity(1 + members.len()); @@ -764,7 +727,7 @@ pub async fn zmscore( for member in members.into_iter() { args.push(member); } - Ok((RedisCommandKind::Zmscore, args)) + Ok((CommandKind::Zmscore, args)) }) .await?; diff --git a/src/commands/impls/streams.rs b/src/commands/impls/streams.rs index a1025759..eaa3c4f8 100644 --- a/src/commands/impls/streams.rs +++ b/src/commands/impls/streams.rs @@ -1,28 +1,24 @@ use super::*; use crate::{ - error::RedisError, + error::Error, protocol::{ - command::{RedisCommand, RedisCommandKind}, + command::{Command, CommandKind}, hashers::ClusterHash, utils as protocol_utils, }, types::{ - MultipleIDs, + streams::{MultipleIDs, MultipleOrderedPairs, XCap, XPendingArgs, XID}, + Key, MultipleKeys, - MultipleOrderedPairs, MultipleStrings, - RedisKey, - RedisValue, - XCap, - XPendingArgs, - XID, + Value, }, utils, }; use bytes_utils::Str; use std::convert::TryInto; -fn encode_cap(args: &mut Vec, cap: XCap) { +fn encode_cap(args: &mut Vec, cap: XCap) { if let Some((kind, trim, threshold, limit)) = cap.into_parts() { args.push(kind.to_str().into()); args.push(trim.to_str().into()); @@ -34,31 +30,27 @@ fn encode_cap(args: &mut Vec, cap: XCap) { } } -pub async fn xinfo_consumers( - client: &C, - key: RedisKey, - groupname: Str, -) -> Result { +pub async fn xinfo_consumers(client: &C, key: Key, groupname: Str) -> Result { let frame = utils::request_response(client, move || { let args = vec![key.into(), groupname.into()]; - Ok((RedisCommandKind::XinfoConsumers, args)) + Ok((CommandKind::XinfoConsumers, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn xinfo_groups(client: &C, key: RedisKey) -> Result { - let frame = utils::request_response(client, move || Ok((RedisCommandKind::XinfoGroups, vec![key.into()]))).await?; +pub async fn xinfo_groups(client: &C, key: Key) -> Result { + let frame = utils::request_response(client, move || Ok((CommandKind::XinfoGroups, vec![key.into()]))).await?; protocol_utils::frame_to_results(frame) } pub async fn xinfo_stream( client: &C, - key: RedisKey, + key: Key, full: bool, count: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(4); args.push(key.into()); @@ -71,7 +63,7 @@ pub async fn xinfo_stream( } } - Ok((RedisCommandKind::XinfoStream, args)) + Ok((CommandKind::XinfoStream, args)) }) .await?; @@ -80,12 +72,12 @@ pub async fn xinfo_stream( pub async fn xadd( client: &C, - key: RedisKey, + key: Key, nomkstream: bool, cap: XCap, id: XID, fields: MultipleOrderedPairs, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(8 + (fields.len() * 2)); args.push(key.into()); @@ -101,27 +93,27 @@ pub async fn xadd( args.push(value); } - Ok((RedisCommandKind::Xadd, args)) + Ok((CommandKind::Xadd, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn xtrim(client: &C, key: RedisKey, cap: XCap) -> Result { +pub async fn xtrim(client: &C, key: Key, cap: XCap) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(6); args.push(key.into()); encode_cap(&mut args, cap); - Ok((RedisCommandKind::Xtrim, args)) + Ok((CommandKind::Xtrim, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn xdel(client: &C, key: RedisKey, ids: MultipleStrings) -> Result { +pub async fn xdel(client: &C, key: Key, ids: MultipleStrings) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(1 + ids.len()); args.push(key.into()); @@ -129,7 +121,7 @@ pub async fn xdel(client: &C, key: RedisKey, ids: MultipleStrings for id in ids.inner().into_iter() { args.push(id.into()); } - Ok((RedisCommandKind::Xdel, args)) + Ok((CommandKind::Xdel, args)) }) .await?; @@ -138,11 +130,11 @@ pub async fn xdel(client: &C, key: RedisKey, ids: MultipleStrings pub async fn xrange( client: &C, - key: RedisKey, - start: RedisValue, - end: RedisValue, + key: Key, + start: Value, + end: Value, count: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(5); args.push(key.into()); @@ -154,7 +146,7 @@ pub async fn xrange( args.push(count.try_into()?); } - Ok((RedisCommandKind::Xrange, args)) + Ok((CommandKind::Xrange, args)) }) .await?; @@ -163,11 +155,11 @@ pub async fn xrange( pub async fn xrevrange( client: &C, - key: RedisKey, - end: RedisValue, - start: RedisValue, + key: Key, + end: Value, + start: Value, count: Option, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(5); args.push(key.into()); @@ -179,15 +171,15 @@ pub async fn xrevrange( args.push(count.try_into()?); } - Ok((RedisCommandKind::Xrevrange, args)) + Ok((CommandKind::Xrevrange, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn xlen(client: &C, key: RedisKey) -> Result { - one_arg_value_cmd(client, RedisCommandKind::Xlen, key.into()).await +pub async fn xlen(client: &C, key: Key) -> Result { + one_arg_value_cmd(client, CommandKind::Xlen, key.into()).await } pub async fn xread( @@ -196,7 +188,7 @@ pub async fn xread( block: Option, keys: MultipleKeys, ids: MultipleIDs, -) -> Result { +) -> Result { let is_clustered = client.inner().config.server.is_clustered(); let frame = utils::request_response(client, move || { let is_blocking = block.is_some(); @@ -226,7 +218,7 @@ pub async fn xread( args.push(id.into_str().into()); } - let mut command: RedisCommand = (RedisCommandKind::Xread, args).into(); + let mut command: Command = (CommandKind::Xread, args).into(); command.can_pipeline = !is_blocking; command.hasher = hash_slot.unwrap_or(ClusterHash::Random); Ok(command) @@ -238,11 +230,11 @@ pub async fn xread( pub async fn xgroup_create( client: &C, - key: RedisKey, + key: Key, groupname: Str, id: XID, mkstream: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(4); args.push(key.into()); @@ -252,7 +244,7 @@ pub async fn xgroup_create( args.push(static_val!(MKSTREAM)); } - Ok((RedisCommandKind::Xgroupcreate, args)) + Ok((CommandKind::Xgroupcreate, args)) }) .await?; @@ -261,12 +253,12 @@ pub async fn xgroup_create( pub async fn xgroup_createconsumer( client: &C, - key: RedisKey, + key: Key, groupname: Str, consumername: Str, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::XgroupCreateConsumer, vec![ + Ok((CommandKind::XgroupCreateConsumer, vec![ key.into(), groupname.into(), consumername.into(), @@ -279,12 +271,12 @@ pub async fn xgroup_createconsumer( pub async fn xgroup_delconsumer( client: &C, - key: RedisKey, + key: Key, groupname: Str, consumername: Str, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::XgroupDelConsumer, vec![ + Ok((CommandKind::XgroupDelConsumer, vec![ key.into(), groupname.into(), consumername.into(), @@ -295,27 +287,18 @@ pub async fn xgroup_delconsumer( protocol_utils::frame_to_results(frame) } -pub async fn xgroup_destroy( - client: &C, - key: RedisKey, - groupname: Str, -) -> Result { +pub async fn xgroup_destroy(client: &C, key: Key, groupname: Str) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::XgroupDestroy, vec![key.into(), groupname.into()])) + Ok((CommandKind::XgroupDestroy, vec![key.into(), groupname.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn xgroup_setid( - client: &C, - key: RedisKey, - groupname: Str, - id: XID, -) -> Result { +pub async fn xgroup_setid(client: &C, key: Key, groupname: Str, id: XID) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::XgroupSetId, vec![ + Ok((CommandKind::XgroupSetId, vec![ key.into(), groupname.into(), id.into_str().into(), @@ -335,7 +318,7 @@ pub async fn xreadgroup( noack: bool, keys: MultipleKeys, ids: MultipleIDs, -) -> Result { +) -> Result { let is_clustered = client.inner().config.server.is_clustered(); let frame = utils::request_response(client, move || { let is_blocking = block.is_some(); @@ -370,7 +353,7 @@ pub async fn xreadgroup( args.push(id.into_str().into()); } - let mut command: RedisCommand = (RedisCommandKind::Xreadgroup, args).into(); + let mut command: Command = (CommandKind::Xreadgroup, args).into(); command.can_pipeline = !is_blocking; command.hasher = hash_slot.unwrap_or(ClusterHash::Random); Ok(command) @@ -380,12 +363,7 @@ pub async fn xreadgroup( protocol_utils::frame_to_results(frame) } -pub async fn xack( - client: &C, - key: RedisKey, - group: Str, - ids: MultipleIDs, -) -> Result { +pub async fn xack(client: &C, key: Key, group: Str, ids: MultipleIDs) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2 + ids.len()); args.push(key.into()); @@ -394,7 +372,7 @@ pub async fn xack( for id in ids.inner().into_iter() { args.push(id.into_str().into()); } - Ok((RedisCommandKind::Xack, args)) + Ok((CommandKind::Xack, args)) }) .await?; @@ -403,7 +381,7 @@ pub async fn xack( pub async fn xclaim( client: &C, - key: RedisKey, + key: Key, group: Str, consumer: Str, min_idle_time: u64, @@ -413,7 +391,7 @@ pub async fn xclaim( retry_count: Option, force: bool, justid: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(12 + ids.len()); args.push(key.into()); @@ -443,7 +421,7 @@ pub async fn xclaim( args.push(static_val!(JUSTID)); } - Ok((RedisCommandKind::Xclaim, args)) + Ok((CommandKind::Xclaim, args)) }) .await?; @@ -452,14 +430,14 @@ pub async fn xclaim( pub async fn xautoclaim( client: &C, - key: RedisKey, + key: Key, group: Str, consumer: Str, min_idle_time: u64, start: XID, count: Option, justid: bool, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(8); args.push(key.into()); @@ -476,7 +454,7 @@ pub async fn xautoclaim( args.push(static_val!(JUSTID)); } - Ok((RedisCommandKind::Xautoclaim, args)) + Ok((CommandKind::Xautoclaim, args)) }) .await?; @@ -485,10 +463,10 @@ pub async fn xautoclaim( pub async fn xpending( client: &C, - key: RedisKey, + key: Key, group: Str, cmd_args: XPendingArgs, -) -> Result { +) -> Result { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(8); args.push(key.into()); @@ -507,7 +485,7 @@ pub async fn xpending( } } - Ok((RedisCommandKind::Xpending, args)) + Ok((CommandKind::Xpending, args)) }) .await?; diff --git a/src/commands/impls/timeseries.rs b/src/commands/impls/timeseries.rs index 091a0aa4..40a90ded 100644 --- a/src/commands/impls/timeseries.rs +++ b/src/commands/impls/timeseries.rs @@ -1,19 +1,21 @@ use crate::{ - error::RedisError, - interfaces::{ClientLike, RedisResult}, - prelude::RedisKey, - protocol::{command::RedisCommandKind, utils as protocol_utils}, + error::Error, + interfaces::{ClientLike, FredResult}, + prelude::Key, + protocol::{command::CommandKind, utils as protocol_utils}, types::{ - Aggregator, - DuplicatePolicy, - Encoding, - GetLabels, - GetTimestamp, - GroupBy, - RangeAggregation, - RedisMap, - RedisValue, - Timestamp, + timeseries::{ + Aggregator, + DuplicatePolicy, + Encoding, + GetLabels, + GetTimestamp, + GroupBy, + RangeAggregation, + Timestamp, + }, + Map, + Value, }, utils, }; @@ -42,7 +44,7 @@ static UNCOMPRESSED: &str = "UNCOMPRESSED"; static TIMESTAMP: &str = "TIMESTAMP"; static DEBUG: &str = "DEBUG"; -fn add_labels(args: &mut Vec, labels: RedisMap) { +fn add_labels(args: &mut Vec, labels: Map) { if !labels.is_empty() { args.push(static_val!(LABELS)); @@ -53,7 +55,7 @@ fn add_labels(args: &mut Vec, labels: RedisMap) { } } -fn add_retention(args: &mut Vec, retention: Option) -> Result<(), RedisError> { +fn add_retention(args: &mut Vec, retention: Option) -> Result<(), Error> { if let Some(retention) = retention { args.push(static_val!(RETENTION)); args.push(retention.try_into()?); @@ -62,14 +64,14 @@ fn add_retention(args: &mut Vec, retention: Option) -> Result<( Ok(()) } -fn add_encoding(args: &mut Vec, encoding: Option) { +fn add_encoding(args: &mut Vec, encoding: Option) { if let Some(encoding) = encoding { args.push(static_val!(ENCODING)); args.push(encoding.to_str().into()); } } -fn add_chunk_size(args: &mut Vec, chunk_size: Option) -> Result<(), RedisError> { +fn add_chunk_size(args: &mut Vec, chunk_size: Option) -> Result<(), Error> { if let Some(chunk_size) = chunk_size { args.push(static_val!(CHUNK_SIZE)); args.push(chunk_size.try_into()?); @@ -78,21 +80,21 @@ fn add_chunk_size(args: &mut Vec, chunk_size: Option) -> Result Ok(()) } -fn add_timestamp(args: &mut Vec, timestamp: Option) { +fn add_timestamp(args: &mut Vec, timestamp: Option) { if let Some(timestamp) = timestamp { args.push(static_val!(TIMESTAMP)); args.push(timestamp.to_value()); } } -fn add_duplicate_policy(args: &mut Vec, duplicate_policy: Option) { +fn add_duplicate_policy(args: &mut Vec, duplicate_policy: Option) { if let Some(duplicate) = duplicate_policy { args.push(static_val!(DUPLICATE_POLICY)); args.push(duplicate.to_str().into()); } } -fn add_count(args: &mut Vec, count: Option) -> Result<(), RedisError> { +fn add_count(args: &mut Vec, count: Option) -> Result<(), Error> { if let Some(count) = count { args.push(static_val!(COUNT)); args.push(count.try_into()?); @@ -100,7 +102,7 @@ fn add_count(args: &mut Vec, count: Option) -> Result<(), Redis Ok(()) } -fn add_get_labels(args: &mut Vec, labels: Option) { +fn add_get_labels(args: &mut Vec, labels: Option) { if let Some(labels) = labels { match labels { GetLabels::WithLabels => args.push(static_val!(WITHLABELS)), @@ -112,10 +114,7 @@ fn add_get_labels(args: &mut Vec, labels: Option) { } } -fn add_range_aggregation( - args: &mut Vec, - aggregation: Option, -) -> Result<(), RedisError> { +fn add_range_aggregation(args: &mut Vec, aggregation: Option) -> Result<(), Error> { if let Some(aggregation) = aggregation { if let Some(align) = aggregation.align { args.push(static_val!(ALIGN)); @@ -138,7 +137,7 @@ fn add_range_aggregation( Ok(()) } -fn add_groupby(args: &mut Vec, group_by: Option) { +fn add_groupby(args: &mut Vec, group_by: Option) { if let Some(group_by) = group_by { args.push(static_val!(GROUPBY)); args.push(group_by.groupby.into()); @@ -147,22 +146,22 @@ fn add_groupby(args: &mut Vec, group_by: Option) { } } -fn add_filters(args: &mut Vec, filters: Vec) { +fn add_filters(args: &mut Vec, filters: Vec) { args.push(static_val!(FILTER)); args.extend(filters.into_iter().map(|s| s.into())); } pub async fn ts_add( client: &C, - key: RedisKey, + key: Key, timestamp: Timestamp, value: f64, retention: Option, encoding: Option, chunk_size: Option, on_duplicate: Option, - labels: RedisMap, -) -> RedisResult { + labels: Map, +) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(12 + labels.len() * 2); args.push(key.into()); @@ -178,7 +177,7 @@ pub async fn ts_add( } add_labels(&mut args, labels); - Ok((RedisCommandKind::TsAdd, args)) + Ok((CommandKind::TsAdd, args)) }) .await?; @@ -187,12 +186,12 @@ pub async fn ts_add( pub async fn ts_alter( client: &C, - key: RedisKey, + key: Key, retention: Option, chunk_size: Option, duplicate_policy: Option, - labels: RedisMap, -) -> RedisResult { + labels: Map, +) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(8 + labels.len() * 2); args.push(key.into()); @@ -201,7 +200,7 @@ pub async fn ts_alter( add_chunk_size(&mut args, chunk_size)?; add_duplicate_policy(&mut args, duplicate_policy); add_labels(&mut args, labels); - Ok((RedisCommandKind::TsAlter, args)) + Ok((CommandKind::TsAlter, args)) }) .await?; @@ -210,13 +209,13 @@ pub async fn ts_alter( pub async fn ts_create( client: &C, - key: RedisKey, + key: Key, retention: Option, encoding: Option, chunk_size: Option, duplicate_policy: Option, - labels: RedisMap, -) -> RedisResult { + labels: Map, +) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(10 + labels.len() * 2); args.push(key.into()); @@ -226,7 +225,7 @@ pub async fn ts_create( add_chunk_size(&mut args, chunk_size)?; add_duplicate_policy(&mut args, duplicate_policy); add_labels(&mut args, labels); - Ok((RedisCommandKind::TsCreate, args)) + Ok((CommandKind::TsCreate, args)) }) .await?; @@ -235,11 +234,11 @@ pub async fn ts_create( pub async fn ts_createrule( client: &C, - src: RedisKey, - dest: RedisKey, + src: Key, + dest: Key, aggregation: (Aggregator, u64), align_timestamp: Option, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(6); args.extend([ @@ -253,7 +252,7 @@ pub async fn ts_createrule( if let Some(align) = align_timestamp { args.push(align.try_into()?) } - Ok((RedisCommandKind::TsCreateRule, args)) + Ok((CommandKind::TsCreateRule, args)) }) .await?; @@ -262,14 +261,14 @@ pub async fn ts_createrule( pub async fn ts_decrby( client: &C, - key: RedisKey, + key: Key, subtrahend: f64, timestamp: Option, retention: Option, uncompressed: bool, chunk_size: Option, - labels: RedisMap, -) -> RedisResult { + labels: Map, +) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(10 + labels.len() * 2); args.push(key.into()); @@ -283,32 +282,32 @@ pub async fn ts_decrby( add_chunk_size(&mut args, chunk_size)?; add_labels(&mut args, labels); - Ok((RedisCommandKind::TsDecrBy, args)) + Ok((CommandKind::TsDecrBy, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ts_del(client: &C, key: RedisKey, from: i64, to: i64) -> RedisResult { +pub async fn ts_del(client: &C, key: Key, from: i64, to: i64) -> FredResult { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::TsDel, vec![key.into(), from.into(), to.into()])) + Ok((CommandKind::TsDel, vec![key.into(), from.into(), to.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ts_deleterule(client: &C, src: RedisKey, dest: RedisKey) -> RedisResult { +pub async fn ts_deleterule(client: &C, src: Key, dest: Key) -> FredResult { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::TsDeleteRule, vec![src.into(), dest.into()])) + Ok((CommandKind::TsDeleteRule, vec![src.into(), dest.into()])) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ts_get(client: &C, key: RedisKey, latest: bool) -> RedisResult { +pub async fn ts_get(client: &C, key: Key, latest: bool) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); args.push(key.into()); @@ -316,7 +315,7 @@ pub async fn ts_get(client: &C, key: RedisKey, latest: bool) -> R args.push(static_val!(LATEST)); } - Ok((RedisCommandKind::TsGet, args)) + Ok((CommandKind::TsGet, args)) }) .await?; @@ -325,14 +324,14 @@ pub async fn ts_get(client: &C, key: RedisKey, latest: bool) -> R pub async fn ts_incrby( client: &C, - key: RedisKey, + key: Key, addend: f64, timestamp: Option, retention: Option, uncompressed: bool, chunk_size: Option, - labels: RedisMap, -) -> RedisResult { + labels: Map, +) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(10 + labels.len() * 2); args.push(key.into()); @@ -346,14 +345,14 @@ pub async fn ts_incrby( add_chunk_size(&mut args, chunk_size)?; add_labels(&mut args, labels); - Ok((RedisCommandKind::TsIncrBy, args)) + Ok((CommandKind::TsIncrBy, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ts_info(client: &C, key: RedisKey, debug: bool) -> RedisResult { +pub async fn ts_info(client: &C, key: Key, debug: bool) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(2); args.push(key.into()); @@ -361,20 +360,20 @@ pub async fn ts_info(client: &C, key: RedisKey, debug: bool) -> R args.push(static_val!(DEBUG)); } - Ok((RedisCommandKind::TsInfo, args)) + Ok((CommandKind::TsInfo, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ts_madd(client: &C, samples: Vec<(RedisKey, Timestamp, f64)>) -> RedisResult { +pub async fn ts_madd(client: &C, samples: Vec<(Key, Timestamp, f64)>) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(samples.len() * 3); for (key, timestamp, value) in samples.into_iter() { args.extend([key.into(), timestamp.to_value(), value.into()]); } - Ok((RedisCommandKind::TsMAdd, args)) + Ok((CommandKind::TsMAdd, args)) }) .await?; @@ -386,7 +385,7 @@ pub async fn ts_mget( latest: bool, labels: Option, filters: Vec, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, move || { let labels_len = labels.as_ref().map(|l| l.args_len()).unwrap_or(0); let mut args = Vec::with_capacity(2 + labels_len + filters.len()); @@ -396,7 +395,7 @@ pub async fn ts_mget( add_get_labels(&mut args, labels); add_filters(&mut args, filters); - Ok((RedisCommandKind::TsMGet, args)) + Ok((CommandKind::TsMGet, args)) }) .await?; @@ -415,7 +414,7 @@ pub async fn ts_mrange( aggregation: Option, filters: Vec, group_by: Option, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, move || { let labels_len = labels.as_ref().map(|l| l.args_len()).unwrap_or(0); let mut args = Vec::with_capacity(18 + filter_by_ts.len() + labels_len + filters.len()); @@ -438,7 +437,7 @@ pub async fn ts_mrange( add_filters(&mut args, filters); add_groupby(&mut args, group_by); - Ok((RedisCommandKind::TsMRange, args)) + Ok((CommandKind::TsMRange, args)) }) .await?; @@ -457,7 +456,7 @@ pub async fn ts_mrevrange( aggregation: Option, filters: Vec, group_by: Option, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, move || { let labels_len = labels.as_ref().map(|l| l.args_len()).unwrap_or(0); let mut args = Vec::with_capacity(18 + filter_by_ts.len() + labels_len + filters.len()); @@ -480,17 +479,17 @@ pub async fn ts_mrevrange( add_filters(&mut args, filters); add_groupby(&mut args, group_by); - Ok((RedisCommandKind::TsMRevRange, args)) + Ok((CommandKind::TsMRevRange, args)) }) .await?; protocol_utils::frame_to_results(frame) } -pub async fn ts_queryindex(client: &C, filters: Vec) -> RedisResult { +pub async fn ts_queryindex(client: &C, filters: Vec) -> FredResult { let frame = utils::request_response(client, move || { Ok(( - RedisCommandKind::TsQueryIndex, + CommandKind::TsQueryIndex, filters.into_iter().map(|v| v.into()).collect(), )) }) @@ -501,7 +500,7 @@ pub async fn ts_queryindex(client: &C, filters: Vec) -> Redi pub async fn ts_range( client: &C, - key: RedisKey, + key: Key, from: GetTimestamp, to: GetTimestamp, latest: bool, @@ -509,7 +508,7 @@ pub async fn ts_range( filter_by_value: Option<(i64, i64)>, count: Option, aggregation: Option, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(14 + filter_by_ts.len()); args.push(key.into()); @@ -529,7 +528,7 @@ pub async fn ts_range( add_count(&mut args, count)?; add_range_aggregation(&mut args, aggregation)?; - Ok((RedisCommandKind::TsRange, args)) + Ok((CommandKind::TsRange, args)) }) .await?; @@ -538,7 +537,7 @@ pub async fn ts_range( pub async fn ts_revrange( client: &C, - key: RedisKey, + key: Key, from: GetTimestamp, to: GetTimestamp, latest: bool, @@ -546,7 +545,7 @@ pub async fn ts_revrange( filter_by_value: Option<(i64, i64)>, count: Option, aggregation: Option, -) -> RedisResult { +) -> FredResult { let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(14 + filter_by_ts.len()); args.push(key.into()); @@ -566,7 +565,7 @@ pub async fn ts_revrange( add_count(&mut args, count)?; add_range_aggregation(&mut args, aggregation)?; - Ok((RedisCommandKind::TsRevRange, args)) + Ok((CommandKind::TsRevRange, args)) }) .await?; diff --git a/src/commands/impls/tracking.rs b/src/commands/impls/tracking.rs index 9b088ddf..c7706fed 100644 --- a/src/commands/impls/tracking.rs +++ b/src/commands/impls/tracking.rs @@ -1,13 +1,13 @@ use crate::{ - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces::ClientLike, protocol::{ - command::{RedisCommand, RedisCommandKind}, + command::{Command, CommandKind}, responders::ResponseKind, utils as protocol_utils, }, runtime::oneshot_channel, - types::{ClusterHash, MultipleStrings, RedisValue, Toggle}, + types::{client::Toggle, ClusterHash, MultipleStrings, Value}, utils, }; use redis_protocol::redis_keyslot; @@ -29,7 +29,7 @@ fn tracking_args( optin: bool, optout: bool, noloop: bool, -) -> Vec { +) -> Vec { let mut args = Vec::with_capacity(prefixes.len() * 2 + 7); args.push(static_val!(toggle.to_str())); if let Some(redirect) = redirect { @@ -63,12 +63,9 @@ pub async fn start_tracking( optin: bool, optout: bool, noloop: bool, -) -> Result<(), RedisError> { +) -> Result<(), Error> { if !client.inner().is_resp3() { - return Err(RedisError::new( - RedisErrorKind::Config, - "Client tracking requires RESP3.", - )); + return Err(Error::new(ErrorKind::Config, "Client tracking requires RESP3.")); } let args = tracking_args(Toggle::On, None, prefixes, bcast, optin, optout, noloop); @@ -76,7 +73,7 @@ pub async fn start_tracking( if bcast { // only send the tracking command on one connection when in bcast mode let frame = utils::request_response(client, move || { - let mut command = RedisCommand::new(RedisCommandKind::ClientTracking, args); + let mut command = Command::new(CommandKind::ClientTracking, args); command.hasher = ClusterHash::Custom(redis_keyslot(client.id().as_bytes())); Ok(command) }) @@ -86,8 +83,8 @@ pub async fn start_tracking( } else { // send the tracking command to all nodes when not in bcast mode let (tx, rx) = oneshot_channel(); - let response = ResponseKind::new_buffer(tx); - let command: RedisCommand = (RedisCommandKind::_ClientTrackingCluster, args, response).into(); + let response = ResponseKind::Respond(Some(tx)); + let command: Command = (CommandKind::_ClientTrackingCluster, args, response).into(); client.send_command(command)?; let frame = utils::timeout(rx, client.inner().internal_command_timeout()).await??; @@ -95,34 +92,31 @@ pub async fn start_tracking( Ok(()) } } else { - utils::request_response(client, move || Ok((RedisCommandKind::ClientTracking, args))) + utils::request_response(client, move || Ok((CommandKind::ClientTracking, args))) .await .and_then(protocol_utils::frame_to_results) .and_then(|v| v.convert()) } } -pub async fn stop_tracking(client: &C) -> Result<(), RedisError> { +pub async fn stop_tracking(client: &C) -> Result<(), Error> { if !client.inner().is_resp3() { - return Err(RedisError::new( - RedisErrorKind::Config, - "Client tracking requires RESP3.", - )); + return Err(Error::new(ErrorKind::Config, "Client tracking requires RESP3.")); } let args = vec![static_val!(Toggle::Off.to_str())]; if client.is_clustered() { // turn off tracking on all connections let (tx, rx) = oneshot_channel(); - let response = ResponseKind::new_buffer(tx); - let command: RedisCommand = (RedisCommandKind::_ClientTrackingCluster, args, response).into(); + let response = ResponseKind::Respond(Some(tx)); + let command: Command = (CommandKind::_ClientTrackingCluster, args, response).into(); client.send_command(command)?; let frame = utils::timeout(rx, client.inner().internal_command_timeout()).await??; let _ = protocol_utils::frame_to_results(frame)?; Ok(()) } else { - utils::request_response(client, move || Ok((RedisCommandKind::ClientTracking, args))) + utils::request_response(client, move || Ok((CommandKind::ClientTracking, args))) .await .and_then(protocol_utils::frame_to_results) .and_then(|v| v.convert()) @@ -138,34 +132,34 @@ pub async fn client_tracking( optin: bool, optout: bool, noloop: bool, -) -> Result { +) -> Result { let args = tracking_args(toggle, redirect, prefixes, bcast, optin, optout, noloop); - utils::request_response(client, move || Ok((RedisCommandKind::ClientTracking, args))) + utils::request_response(client, move || Ok((CommandKind::ClientTracking, args))) .await .and_then(protocol_utils::frame_to_results) } -pub async fn client_trackinginfo(client: &C) -> Result { - utils::request_response(client, move || Ok((RedisCommandKind::ClientTrackingInfo, vec![]))) +pub async fn client_trackinginfo(client: &C) -> Result { + utils::request_response(client, move || Ok((CommandKind::ClientTrackingInfo, vec![]))) .await .and_then(protocol_utils::frame_to_results) } -pub async fn client_getredir(client: &C) -> Result { - utils::request_response(client, move || Ok((RedisCommandKind::ClientGetRedir, vec![]))) +pub async fn client_getredir(client: &C) -> Result { + utils::request_response(client, move || Ok((CommandKind::ClientGetRedir, vec![]))) .await .and_then(protocol_utils::frame_to_results) } -pub async fn client_caching(client: &C, enabled: bool) -> Result { +pub async fn client_caching(client: &C, enabled: bool) -> Result { let args = if enabled { vec![static_val!(YES)] } else { vec![static_val!(NO)] }; - utils::request_response(client, move || Ok((RedisCommandKind::ClientCaching, args))) + utils::request_response(client, move || Ok((CommandKind::ClientCaching, args))) .await .and_then(protocol_utils::frame_to_results) } diff --git a/src/commands/interfaces/acl.rs b/src/commands/interfaces/acl.rs index ff58b845..e06e8c7b 100644 --- a/src/commands/interfaces/acl.rs +++ b/src/commands/interfaces/acl.rs @@ -1,8 +1,8 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{FromRedis, MultipleStrings, MultipleValues, RedisValue}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{FromValue, MultipleStrings, MultipleValues, Value}, }; use bytes_utils::Str; use fred_macros::rm_send_if; @@ -14,11 +14,11 @@ pub trait AclInterface: ClientLike + Sized { /// Create an ACL user with the specified rules or modify the rules of an existing user. /// /// - fn acl_setuser(&self, username: S, rules: V) -> impl Future> + Send + fn acl_setuser(&self, username: S, rules: V) -> impl Future> + Send where S: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(username); @@ -31,7 +31,7 @@ pub trait AclInterface: ClientLike + Sized { /// the ACLs from the file, replacing all the current ACL rules with the ones defined in the file. /// /// - fn acl_load(&self) -> impl Future> + Send { + fn acl_load(&self) -> impl Future> + Send { async move { commands::acl::acl_load(self).await } } @@ -39,16 +39,16 @@ pub trait AclInterface: ClientLike + Sized { /// currently defined ACLs from the server memory to the ACL file. /// /// - fn acl_save(&self) -> impl Future> + Send { + fn acl_save(&self) -> impl Future> + Send { async move { commands::acl::acl_save(self).await } } /// The command shows the currently active ACL rules in the Redis server. /// /// \ - fn acl_list(&self) -> impl Future> + Send + fn acl_list(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::acl::acl_list(self).await?.convert() } } @@ -56,9 +56,9 @@ pub trait AclInterface: ClientLike + Sized { /// The command shows a list of all the usernames of the currently configured users in the Redis ACL system. /// /// - fn acl_users(&self) -> impl Future> + Send + fn acl_users(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::acl::acl_users(self).await?.convert() } } @@ -66,11 +66,11 @@ pub trait AclInterface: ClientLike + Sized { /// The command returns all the rules defined for an existing ACL user. /// /// - fn acl_getuser(&self, username: U) -> impl Future> + Send + fn acl_getuser(&self, username: U) -> impl Future> + Send where - R: FromRedis, - U: TryInto + Send, - U::Error: Into + Send, + R: FromValue, + U: TryInto + Send, + U::Error: Into + Send, { async move { try_into!(username); @@ -81,9 +81,9 @@ pub trait AclInterface: ClientLike + Sized { /// Delete all the specified ACL users and terminate all the connections that are authenticated with such users. /// /// - fn acl_deluser(&self, usernames: S) -> impl Future> + Send + fn acl_deluser(&self, usernames: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -96,9 +96,9 @@ pub trait AclInterface: ClientLike + Sized { /// the command shows all the Redis commands in the specified category. /// /// - fn acl_cat(&self, category: Option) -> impl Future> + Send + fn acl_cat(&self, category: Option) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::acl::acl_cat(self, category).await?.convert() } } @@ -106,9 +106,9 @@ pub trait AclInterface: ClientLike + Sized { /// Generate a password with length `bits`, returning the password. /// /// - fn acl_genpass(&self, bits: Option) -> impl Future> + Send + fn acl_genpass(&self, bits: Option) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::acl::acl_genpass(self, bits).await?.convert() } } @@ -117,9 +117,9 @@ pub trait AclInterface: ClientLike + Sized { /// with the "default" user. /// /// - fn acl_whoami(&self) -> impl Future> + Send + fn acl_whoami(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::acl::acl_whoami(self).await?.convert() } } @@ -127,9 +127,9 @@ pub trait AclInterface: ClientLike + Sized { /// Read `count` recent ACL security events. /// /// - fn acl_log_count(&self, count: Option) -> impl Future> + Send + fn acl_log_count(&self, count: Option) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::acl::acl_log_count(self, count).await?.convert() } } @@ -137,7 +137,7 @@ pub trait AclInterface: ClientLike + Sized { /// Clear the ACL security events logs. /// /// - fn acl_log_reset(&self) -> impl Future> + Send { + fn acl_log_reset(&self) -> impl Future> + Send { async move { commands::acl::acl_log_reset(self).await } } } diff --git a/src/commands/interfaces/client.rs b/src/commands/interfaces/client.rs index ef0dc19c..540e951f 100644 --- a/src/commands/interfaces/client.rs +++ b/src/commands/interfaces/client.rs @@ -1,18 +1,15 @@ #[cfg(feature = "i-tracking")] -use crate::types::{MultipleStrings, Toggle}; +use crate::types::{client::Toggle, MultipleStrings}; use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, + error::Error, + interfaces::{ClientLike, FredResult}, types::{ - ClientKillFilter, - ClientKillType, - ClientPauseKind, - ClientReplyFlag, + client::{ClientKillFilter, ClientKillType, ClientPauseKind, ClientReplyFlag}, + config::Server, ClientUnblockFlag, - FromRedis, - RedisValue, - Server, + FromValue, + Value, }, }; use bytes_utils::Str; @@ -29,9 +26,9 @@ pub trait ClientInterface: ClientLike + Sized { /// [connection_ids](Self::connection_ids) for more information. /// /// - fn client_id(&self) -> impl Future> + Send + fn client_id(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::client::client_id(self).await?.convert() } } @@ -41,17 +38,17 @@ pub trait ClientInterface: ClientLike + Sized { /// The returned map contains each server's `host:port` and the result of calling `CLIENT ID` on the connection. /// /// Note: despite being async this function will return cached information from the client if possible. - fn connection_ids(&self) -> impl Future> + Send { - async move { self.inner().backchannel.write().await.connection_ids.clone() } + fn connection_ids(&self) -> HashMap { + self.inner().backchannel.connection_ids.lock().clone() } /// The command returns information and statistics about the current client connection in a mostly human readable /// format. /// /// - fn client_info(&self) -> impl Future> + Send + fn client_info(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::client::client_info(self).await?.convert() } } @@ -59,9 +56,9 @@ pub trait ClientInterface: ClientLike + Sized { /// Close a given connection or set of connections. /// /// - fn client_kill(&self, filters: Vec) -> impl Future> + Send + fn client_kill(&self, filters: Vec) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::client::client_kill(self, filters).await?.convert() } } @@ -74,9 +71,9 @@ pub trait ClientInterface: ClientLike + Sized { &self, r#type: Option, ids: Option>, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::client::client_list(self, r#type, ids).await?.convert() } } @@ -84,9 +81,9 @@ pub trait ClientInterface: ClientLike + Sized { /// The CLIENT GETNAME returns the name of the current connection as set by CLIENT SETNAME. /// /// - fn client_getname(&self) -> impl Future> + Send + fn client_getname(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::client::client_getname(self).await?.convert() } } @@ -97,7 +94,7 @@ pub trait ClientInterface: ClientLike + Sized { /// connections. Use `self.id() to read the automatically generated name.** /// /// - fn client_setname(&self, name: S) -> impl Future> + Send + fn client_setname(&self, name: S) -> impl Future> + Send where S: Into + Send, { @@ -111,18 +108,14 @@ pub trait ClientInterface: ClientLike + Sized { /// time (in milliseconds). /// /// - fn client_pause( - &self, - timeout: i64, - mode: Option, - ) -> impl Future> + Send { + fn client_pause(&self, timeout: i64, mode: Option) -> impl Future> + Send { async move { commands::client::client_pause(self, timeout, mode).await } } /// CLIENT UNPAUSE is used to resume command processing for all clients that were paused by CLIENT PAUSE. /// /// - fn client_unpause(&self) -> impl Future> + Send { + fn client_unpause(&self) -> impl Future> + Send { async move { commands::client::client_unpause(self).await } } @@ -130,7 +123,7 @@ pub trait ClientInterface: ClientLike + Sized { /// available: /// /// - fn client_reply(&self, flag: ClientReplyFlag) -> impl Future> + Send { + fn client_reply(&self, flag: ClientReplyFlag) -> impl Future> + Send { async move { commands::client::client_reply(self, flag).await } } @@ -140,14 +133,10 @@ pub trait ClientInterface: ClientLike + Sized { /// Note: this command is sent on a backchannel connection and will work even when the main connection is blocked. /// /// - fn client_unblock( - &self, - id: S, - flag: Option, - ) -> impl Future> + Send + fn client_unblock(&self, id: S, flag: Option) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, + R: FromValue, + S: Into + Send, { async move { into!(id); @@ -156,18 +145,18 @@ pub trait ClientInterface: ClientLike + Sized { } /// A convenience function to unblock any blocked connection on this client. - fn unblock_self(&self, flag: Option) -> impl Future> + Send { + fn unblock_self(&self, flag: Option) -> impl Future> + Send { async move { commands::client::unblock_self(self, flag).await } } /// Returns message. /// /// - fn echo(&self, message: M) -> impl Future> + Send + fn echo(&self, message: M) -> impl Future> + Send where - R: FromRedis, - M: TryInto + Send, - M::Error: Into + Send, + R: FromValue, + M: TryInto + Send, + M::Error: Into + Send, { async move { try_into!(message); @@ -195,11 +184,11 @@ pub trait ClientInterface: ClientLike + Sized { optin: bool, optout: bool, noloop: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, T: TryInto + Send, - T::Error: Into + Send, + T::Error: Into + Send, P: Into + Send, { async move { @@ -215,12 +204,11 @@ pub trait ClientInterface: ClientLike + Sized { /// caching feature. /// /// - #[cfg(feature = "i-tracking")] #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] - fn client_trackinginfo(&self) -> impl Future> + Send + fn client_trackinginfo(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::tracking::client_trackinginfo(self).await?.convert() } } @@ -230,9 +218,9 @@ pub trait ClientInterface: ClientLike + Sized { /// #[cfg(feature = "i-tracking")] #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] - fn client_getredir(&self) -> impl Future> + Send + fn client_getredir(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::tracking::client_getredir(self).await?.convert() } } @@ -247,9 +235,9 @@ pub trait ClientInterface: ClientLike + Sized { /// types. #[cfg(feature = "i-tracking")] #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] - fn client_caching(&self, enabled: bool) -> impl Future> + Send + fn client_caching(&self, enabled: bool) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::tracking::client_caching(self, enabled).await?.convert() } } diff --git a/src/commands/interfaces/cluster.rs b/src/commands/interfaces/cluster.rs index 115d9626..ee0d390a 100644 --- a/src/commands/interfaces/cluster.rs +++ b/src/commands/interfaces/cluster.rs @@ -1,9 +1,14 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, + error::Error, + interfaces::{ClientLike, FredResult}, protocol::types::ClusterRouting, - types::{ClusterFailoverFlag, ClusterResetFlag, ClusterSetSlotState, FromRedis, MultipleHashSlots, RedisKey}, + types::{ + cluster::{ClusterFailoverFlag, ClusterResetFlag, ClusterSetSlotState}, + FromValue, + Key, + MultipleHashSlots, + }, }; use bytes_utils::Str; use fred_macros::rm_send_if; @@ -26,16 +31,16 @@ pub trait ClusterInterface: ClientLike + Sized { } /// Update the cached cluster state and add or remove any changed cluster node connections. - fn sync_cluster(&self) -> impl Future> + Send { + fn sync_cluster(&self) -> impl Future> + Send { async move { commands::cluster::sync_cluster(self).await } } /// Advances the cluster config epoch. /// /// - fn cluster_bumpepoch(&self) -> impl Future> + Send + fn cluster_bumpepoch(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::cluster::cluster_bumpepoch(self).await?.convert() } } @@ -43,16 +48,16 @@ pub trait ClusterInterface: ClientLike + Sized { /// Deletes all slots from a node. /// /// - fn cluster_flushslots(&self) -> impl Future> + Send { + fn cluster_flushslots(&self) -> impl Future> + Send { async move { commands::cluster::cluster_flushslots(self).await } } /// Returns the node's id. /// /// - fn cluster_myid(&self) -> impl Future> + Send + fn cluster_myid(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::cluster::cluster_myid(self).await?.convert() } } @@ -63,9 +68,9 @@ pub trait ClusterInterface: ClientLike + Sized { /// [cached_cluster_state](Self::cached_cluster_state). /// /// - fn cluster_nodes(&self) -> impl Future> + Send + fn cluster_nodes(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::cluster::cluster_nodes(self).await?.convert() } } @@ -73,16 +78,16 @@ pub trait ClusterInterface: ClientLike + Sized { /// Forces a node to save the nodes.conf configuration on disk. /// /// - fn cluster_saveconfig(&self) -> impl Future> + Send { + fn cluster_saveconfig(&self) -> impl Future> + Send { async move { commands::cluster::cluster_saveconfig(self).await } } /// CLUSTER SLOTS returns details about which cluster slots map to which Redis instances. /// /// - fn cluster_slots(&self) -> impl Future> + Send + fn cluster_slots(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::cluster::cluster_slots(self).await?.convert() } } @@ -90,9 +95,9 @@ pub trait ClusterInterface: ClientLike + Sized { /// CLUSTER INFO provides INFO style information about Redis Cluster vital parameters. /// /// - fn cluster_info(&self) -> impl Future> + Send + fn cluster_info(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::cluster::cluster_info(self).await?.convert() } } @@ -101,7 +106,7 @@ pub trait ClusterInterface: ClientLike + Sized { /// set of hash slots to the node receiving the command. /// /// - fn cluster_add_slots(&self, slots: S) -> impl Future> + Send + fn cluster_add_slots(&self, slots: S) -> impl Future> + Send where S: Into + Send, { @@ -114,9 +119,9 @@ pub trait ClusterInterface: ClientLike + Sized { /// The command returns the number of failure reports for the specified node. /// /// - fn cluster_count_failure_reports(&self, node_id: S) -> impl Future> + Send + fn cluster_count_failure_reports(&self, node_id: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -130,9 +135,9 @@ pub trait ClusterInterface: ClientLike + Sized { /// Returns the number of keys in the specified Redis Cluster hash slot. /// /// - fn cluster_count_keys_in_slot(&self, slot: u16) -> impl Future> + Send + fn cluster_count_keys_in_slot(&self, slot: u16) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::cluster::cluster_count_keys_in_slot(self, slot) @@ -145,7 +150,7 @@ pub trait ClusterInterface: ClientLike + Sized { /// slots specified as arguments. /// /// - fn cluster_del_slots(&self, slots: S) -> impl Future> + Send + fn cluster_del_slots(&self, slots: S) -> impl Future> + Send where S: Into + Send, { @@ -159,7 +164,7 @@ pub trait ClusterInterface: ClientLike + Sized { /// failover of its master instance. /// /// - fn cluster_failover(&self, flag: Option) -> impl Future> + Send { + fn cluster_failover(&self, flag: Option) -> impl Future> + Send { async move { commands::cluster::cluster_failover(self, flag).await } } @@ -168,7 +173,7 @@ pub trait ClusterInterface: ClientLike + Sized { /// the node receiving the command. /// /// - fn cluster_forget(&self, node_id: S) -> impl Future> + Send + fn cluster_forget(&self, node_id: S) -> impl Future> + Send where S: Into + Send, { @@ -181,9 +186,9 @@ pub trait ClusterInterface: ClientLike + Sized { /// The command returns an array of keys names stored in the contacted node and hashing to the specified hash slot. /// /// - fn cluster_get_keys_in_slot(&self, slot: u16, count: u64) -> impl Future> + Send + fn cluster_get_keys_in_slot(&self, slot: u16, count: u64) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::cluster::cluster_get_keys_in_slot(self, slot, count) @@ -195,10 +200,10 @@ pub trait ClusterInterface: ClientLike + Sized { /// Returns an integer identifying the hash slot the specified key hashes to. /// /// - fn cluster_keyslot(&self, key: K) -> impl Future> + Send + fn cluster_keyslot(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -210,7 +215,7 @@ pub trait ClusterInterface: ClientLike + Sized { /// cluster. /// /// - fn cluster_meet(&self, ip: S, port: u16) -> impl Future> + Send + fn cluster_meet(&self, ip: S, port: u16) -> impl Future> + Send where S: Into + Send, { @@ -224,7 +229,7 @@ pub trait ClusterInterface: ClientLike + Sized { /// empty master, as a side effect of the command, the node role is changed from master to replica. /// /// - fn cluster_replicate(&self, node_id: S) -> impl Future> + Send + fn cluster_replicate(&self, node_id: S) -> impl Future> + Send where S: Into + Send, { @@ -237,9 +242,9 @@ pub trait ClusterInterface: ClientLike + Sized { /// The command provides a list of replica nodes replicating from the specified master node. /// /// - fn cluster_replicas(&self, node_id: S) -> impl Future> + Send + fn cluster_replicas(&self, node_id: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -253,21 +258,21 @@ pub trait ClusterInterface: ClientLike + Sized { /// reset a master node keys must be removed first, e.g. by using FLUSHALL first, and then CLUSTER RESET. /// /// - fn cluster_reset(&self, mode: Option) -> impl Future> + Send { + fn cluster_reset(&self, mode: Option) -> impl Future> + Send { async move { commands::cluster::cluster_reset(self, mode).await } } /// This command sets a specific config epoch in a fresh node. /// /// - fn cluster_set_config_epoch(&self, epoch: u64) -> impl Future> + Send { + fn cluster_set_config_epoch(&self, epoch: u64) -> impl Future> + Send { async move { commands::cluster::cluster_set_config_epoch(self, epoch).await } } /// CLUSTER SETSLOT is responsible for changing the state of a hash slot in the receiving node in different ways. /// /// - fn cluster_setslot(&self, slot: u16, state: ClusterSetSlotState) -> impl Future> + Send { + fn cluster_setslot(&self, slot: u16, state: ClusterSetSlotState) -> impl Future> + Send { async move { commands::cluster::cluster_setslot(self, slot, state).await } } } diff --git a/src/commands/interfaces/config.rs b/src/commands/interfaces/config.rs index f483519e..5c0d7df5 100644 --- a/src/commands/interfaces/config.rs +++ b/src/commands/interfaces/config.rs @@ -1,8 +1,8 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{FromRedis, RedisValue}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{FromValue, Value}, }; use bytes_utils::Str; use fred_macros::rm_send_if; @@ -15,7 +15,7 @@ pub trait ConfigInterface: ClientLike + Sized { /// Resets the statistics reported by Redis using the INFO command. /// /// - fn config_resetstat(&self) -> impl Future> + Send { + fn config_resetstat(&self) -> impl Future> + Send { async move { commands::config::config_resetstat(self).await } } @@ -24,16 +24,16 @@ pub trait ConfigInterface: ClientLike + Sized { /// compared to the original one because of the use of the CONFIG SET command. /// /// - fn config_rewrite(&self) -> impl Future> + Send { + fn config_rewrite(&self) -> impl Future> + Send { async move { commands::config::config_rewrite(self).await } } /// The CONFIG GET command is used to read the configuration parameters of a running Redis server. /// /// - fn config_get(&self, parameter: S) -> impl Future> + Send + fn config_get(&self, parameter: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -45,11 +45,11 @@ pub trait ConfigInterface: ClientLike + Sized { /// The CONFIG SET command is used in order to reconfigure the server at run time without the need to restart Redis. /// /// - fn config_set(&self, parameter: P, value: V) -> impl Future> + Send + fn config_set(&self, parameter: P, value: V) -> impl Future> + Send where P: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(parameter); diff --git a/src/commands/interfaces/geo.rs b/src/commands/interfaces/geo.rs index 7ca69b62..c34b95ba 100644 --- a/src/commands/interfaces/geo.rs +++ b/src/commands/interfaces/geo.rs @@ -1,18 +1,16 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, + error::Error, + interfaces::{ClientLike, FredResult}, types::{ + geo::{GeoPosition, GeoUnit, MultipleGeoValues}, Any, - FromRedis, - GeoPosition, - GeoUnit, - MultipleGeoValues, + FromValue, + Key, MultipleValues, - RedisKey, - RedisValue, SetOptions, SortOrder, + Value, }, }; use fred_macros::rm_send_if; @@ -31,10 +29,10 @@ pub trait GeoInterface: ClientLike + Sized { options: Option, changed: bool, values: V, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: Into + Send, { async move { @@ -49,12 +47,12 @@ pub trait GeoInterface: ClientLike + Sized { /// representing a geospatial index (where elements were added using GEOADD). /// /// - fn geohash(&self, key: K, members: V) -> impl Future> + Send + fn geohash(&self, key: K, members: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -66,15 +64,15 @@ pub trait GeoInterface: ClientLike + Sized { /// Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by /// the sorted set at key. /// - /// Callers can use [as_geo_position](crate::types::RedisValue::as_geo_position) to lazily parse results as needed. + /// Callers can use [as_geo_position](crate::types::Value::as_geo_position) to lazily parse results as needed. /// /// - fn geopos(&self, key: K, members: V) -> impl Future> + Send + fn geopos(&self, key: K, members: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -92,14 +90,14 @@ pub trait GeoInterface: ClientLike + Sized { src: S, dest: D, unit: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - S: TryInto + Send, - S::Error: Into + Send, - D: TryInto + Send, - D::Error: Into + Send, + R: FromValue, + K: Into + Send, + S: TryInto + Send, + S::Error: Into + Send, + D: TryInto + Send, + D::Error: Into + Send, { async move { into!(key); @@ -123,12 +121,12 @@ pub trait GeoInterface: ClientLike + Sized { withhash: bool, count: Option<(u64, Any)>, ord: Option, - store: Option, - storedist: Option, - ) -> impl Future> + Send + store: Option, + storedist: Option, + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -157,14 +155,14 @@ pub trait GeoInterface: ClientLike + Sized { withhash: bool, count: Option<(u64, Any)>, ord: Option, - store: Option, - storedist: Option, - ) -> impl Future> + Send + store: Option, + storedist: Option, + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -195,7 +193,7 @@ pub trait GeoInterface: ClientLike + Sized { fn geosearch( &self, key: K, - from_member: Option, + from_member: Option, from_lonlat: Option, by_radius: Option<(f64, GeoUnit)>, by_box: Option<(f64, f64, GeoUnit)>, @@ -204,10 +202,10 @@ pub trait GeoInterface: ClientLike + Sized { withcoord: bool, withdist: bool, withhash: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -237,18 +235,18 @@ pub trait GeoInterface: ClientLike + Sized { &self, dest: D, source: S, - from_member: Option, + from_member: Option, from_lonlat: Option, by_radius: Option<(f64, GeoUnit)>, by_box: Option<(f64, f64, GeoUnit)>, ord: Option, count: Option<(u64, Any)>, storedist: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - D: Into + Send, - S: Into + Send, + R: FromValue, + D: Into + Send, + S: Into + Send, { async move { into!(dest, source); diff --git a/src/commands/interfaces/hashes.rs b/src/commands/interfaces/hashes.rs index 9fdd1d56..c2ae7e6b 100644 --- a/src/commands/interfaces/hashes.rs +++ b/src/commands/interfaces/hashes.rs @@ -1,23 +1,26 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{ExpireOptions, FromRedis, MultipleKeys, RedisKey, RedisMap, RedisValue}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{FromValue, Key, Map, MultipleKeys, Value}, }; use fred_macros::rm_send_if; use futures::Future; use std::convert::TryInto; +#[cfg(feature = "i-hexpire")] +use crate::types::ExpireOptions; + /// Functions that implement the [hashes](https://redis.io/commands#hashes) interface. #[rm_send_if(feature = "glommio")] pub trait HashesInterface: ClientLike + Sized { /// Returns all fields and values of the hash stored at `key`. /// /// - fn hgetall(&self, key: K) -> impl Future> + Send + fn hgetall(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -28,10 +31,10 @@ pub trait HashesInterface: ClientLike + Sized { /// Removes the specified fields from the hash stored at `key`. /// /// - fn hdel(&self, key: K, fields: F) -> impl Future> + Send + fn hdel(&self, key: K, fields: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -43,11 +46,11 @@ pub trait HashesInterface: ClientLike + Sized { /// Returns if `field` is an existing field in the hash stored at `key`. /// /// - fn hexists(&self, key: K, field: F) -> impl Future> + Send + fn hexists(&self, key: K, field: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - F: Into + Send, + R: FromValue, + K: Into + Send, + F: Into + Send, { async move { into!(key, field); @@ -58,11 +61,11 @@ pub trait HashesInterface: ClientLike + Sized { /// Returns the value associated with `field` in the hash stored at `key`. /// /// - fn hget(&self, key: K, field: F) -> impl Future> + Send + fn hget(&self, key: K, field: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - F: Into + Send, + R: FromValue, + K: Into + Send, + F: Into + Send, { async move { into!(key, field); @@ -73,11 +76,11 @@ pub trait HashesInterface: ClientLike + Sized { /// Increments the number stored at `field` in the hash stored at `key` by `increment`. /// /// - fn hincrby(&self, key: K, field: F, increment: i64) -> impl Future> + Send + fn hincrby(&self, key: K, field: F, increment: i64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - F: Into + Send, + R: FromValue, + K: Into + Send, + F: Into + Send, { async move { into!(key, field); @@ -89,11 +92,11 @@ pub trait HashesInterface: ClientLike + Sized { /// specified `increment`. /// /// - fn hincrbyfloat(&self, key: K, field: F, increment: f64) -> impl Future> + Send + fn hincrbyfloat(&self, key: K, field: F, increment: f64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - F: Into + Send, + R: FromValue, + K: Into + Send, + F: Into + Send, { async move { into!(key, field); @@ -106,10 +109,10 @@ pub trait HashesInterface: ClientLike + Sized { /// Returns all field names in the hash stored at `key`. /// /// - fn hkeys(&self, key: K) -> impl Future> + Send + fn hkeys(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -120,10 +123,10 @@ pub trait HashesInterface: ClientLike + Sized { /// Returns the number of fields contained in the hash stored at `key`. /// /// - fn hlen(&self, key: K) -> impl Future> + Send + fn hlen(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -134,10 +137,10 @@ pub trait HashesInterface: ClientLike + Sized { /// Returns the values associated with the specified `fields` in the hash stored at `key`. /// /// - fn hmget(&self, key: K, fields: F) -> impl Future> + Send + fn hmget(&self, key: K, fields: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -149,12 +152,12 @@ pub trait HashesInterface: ClientLike + Sized { /// Sets the specified fields to their respective values in the hash stored at `key`. /// /// - fn hmset(&self, key: K, values: V) -> impl Future> + Send + fn hmset(&self, key: K, values: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -166,12 +169,12 @@ pub trait HashesInterface: ClientLike + Sized { /// Sets fields in the hash stored at `key` to their provided values. /// /// - fn hset(&self, key: K, values: V) -> impl Future> + Send + fn hset(&self, key: K, values: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -183,13 +186,13 @@ pub trait HashesInterface: ClientLike + Sized { /// Sets `field` in the hash stored at `key` to `value`, only if `field` does not yet exist. /// /// - fn hsetnx(&self, key: K, field: F, value: V) -> impl Future> + Send + fn hsetnx(&self, key: K, field: F, value: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - F: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + F: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key, field); @@ -203,10 +206,10 @@ pub trait HashesInterface: ClientLike + Sized { /// If the provided `count` argument is positive, return an array of distinct fields. /// /// - fn hrandfield(&self, key: K, count: Option<(i64, bool)>) -> impl Future> + Send + fn hrandfield(&self, key: K, count: Option<(i64, bool)>) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -217,11 +220,11 @@ pub trait HashesInterface: ClientLike + Sized { /// Returns the string length of the value associated with `field` in the hash stored at `key`. /// /// - fn hstrlen(&self, key: K, field: F) -> impl Future> + Send + fn hstrlen(&self, key: K, field: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - F: Into + Send, + R: FromValue, + K: Into + Send, + F: Into + Send, { async move { into!(key, field); @@ -232,10 +235,10 @@ pub trait HashesInterface: ClientLike + Sized { /// Returns all values in the hash stored at `key`. /// /// - fn hvals(&self, key: K) -> impl Future> + Send + fn hvals(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -246,10 +249,12 @@ pub trait HashesInterface: ClientLike + Sized { /// Returns the remaining TTL (time to live) of a hash key's field(s) that have a set expiration. /// /// - fn httl(&self, key: K, fields: F) -> impl Future> + Send + #[cfg(feature = "i-hexpire")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-hexpire")))] + fn httl(&self, key: K, fields: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -261,16 +266,18 @@ pub trait HashesInterface: ClientLike + Sized { /// Set an expiration (TTL or time to live) on one or more fields of a given hash key. /// /// + #[cfg(feature = "i-hexpire")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-hexpire")))] fn hexpire( &self, key: K, seconds: i64, options: Option, fields: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -285,16 +292,18 @@ pub trait HashesInterface: ClientLike + Sized { /// TTL (time to live), it takes an absolute Unix timestamp in seconds since Unix epoch. /// /// + #[cfg(feature = "i-hexpire")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-hexpire")))] fn hexpire_at( &self, key: K, time: i64, options: Option, fields: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -308,10 +317,12 @@ pub trait HashesInterface: ClientLike + Sized { /// Returns the absolute Unix timestamp in seconds since Unix epoch at which the given key's field(s) will expire. /// /// - fn hexpire_time(&self, key: K, fields: F) -> impl Future> + Send + #[cfg(feature = "i-hexpire")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-hexpire")))] + fn hexpire_time(&self, key: K, fields: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -324,10 +335,12 @@ pub trait HashesInterface: ClientLike + Sized { /// milliseconds instead of seconds. /// /// - fn hpttl(&self, key: K, fields: F) -> impl Future> + Send + #[cfg(feature = "i-hexpire")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-hexpire")))] + fn hpttl(&self, key: K, fields: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -339,16 +352,18 @@ pub trait HashesInterface: ClientLike + Sized { /// This command works like HEXPIRE, but the expiration of a field is specified in milliseconds instead of seconds. /// /// + #[cfg(feature = "i-hexpire")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-hexpire")))] fn hpexpire( &self, key: K, milliseconds: i64, options: Option, fields: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -363,16 +378,18 @@ pub trait HashesInterface: ClientLike + Sized { /// specified in milliseconds since Unix epoch instead of seconds. /// /// + #[cfg(feature = "i-hexpire")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-hexpire")))] fn hpexpire_at( &self, key: K, time: i64, options: Option, fields: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -387,10 +404,12 @@ pub trait HashesInterface: ClientLike + Sized { /// milliseconds since Unix epoch instead of seconds. /// /// - fn hpexpire_time(&self, key: K, fields: F) -> impl Future> + Send + #[cfg(feature = "i-hexpire")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-hexpire")))] + fn hpexpire_time(&self, key: K, fields: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { @@ -403,10 +422,12 @@ pub trait HashesInterface: ClientLike + Sized { /// expiration set) to persistent (a field that will never expire as no TTL (time to live) is associated). /// /// - fn hpersist(&self, key: K, fields: F) -> impl Future> + Send + #[cfg(feature = "i-hexpire")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-hexpire")))] + fn hpersist(&self, key: K, fields: F) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: Into + Send, { async move { diff --git a/src/commands/interfaces/hyperloglog.rs b/src/commands/interfaces/hyperloglog.rs index 81d34776..639a158e 100644 --- a/src/commands/interfaces/hyperloglog.rs +++ b/src/commands/interfaces/hyperloglog.rs @@ -1,8 +1,8 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{FromRedis, MultipleKeys, MultipleValues, RedisKey}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{FromValue, Key, MultipleKeys, MultipleValues}, }; use fred_macros::rm_send_if; use futures::Future; @@ -15,12 +15,12 @@ pub trait HyperloglogInterface: ClientLike + Sized { /// argument. /// /// - fn pfadd(&self, key: K, elements: V) -> impl Future> + Send + fn pfadd(&self, key: K, elements: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -36,9 +36,9 @@ pub trait HyperloglogInterface: ClientLike + Sized { /// internally merging the HyperLogLogs stored at the provided keys into a temporary HyperLogLog. /// /// - fn pfcount(&self, keys: K) -> impl Future> + Send + fn pfcount(&self, keys: K) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -51,10 +51,10 @@ pub trait HyperloglogInterface: ClientLike + Sized { /// observed sets of the source HyperLogLog structures. /// /// - fn pfmerge(&self, dest: D, sources: S) -> impl Future> + Send + fn pfmerge(&self, dest: D, sources: S) -> impl Future> + Send where - R: FromRedis, - D: Into + Send, + R: FromValue, + D: Into + Send, S: Into + Send, { async move { diff --git a/src/commands/interfaces/keys.rs b/src/commands/interfaces/keys.rs index fcbf527d..9a4e79ac 100644 --- a/src/commands/interfaces/keys.rs +++ b/src/commands/interfaces/keys.rs @@ -1,9 +1,10 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{Expiration, ExpireOptions, FromRedis, MultipleKeys, RedisKey, RedisMap, RedisValue, SetOptions}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{scan::ScanType, Expiration, ExpireOptions, FromValue, Key, Map, MultipleKeys, SetOptions, Value}, }; +use bytes_utils::Str; use fred_macros::rm_send_if; use futures::Future; use std::convert::TryInto; @@ -13,8 +14,10 @@ use std::convert::TryInto; pub trait KeysInterface: ClientLike + Sized { /// Marks the given keys to be watched for conditional execution of a transaction. /// + /// This should usually be used with an [ExclusivePool](crate::clients::ExclusivePool). + /// /// - fn watch(&self, keys: K) -> impl Future> + Send + fn watch(&self, keys: K) -> impl Future> + Send where K: Into + Send, { @@ -27,16 +30,16 @@ pub trait KeysInterface: ClientLike + Sized { /// Flushes all the previously watched keys for a transaction. /// /// - fn unwatch(&self) -> impl Future> + Send { + fn unwatch(&self) -> impl Future> + Send { async move { commands::keys::unwatch(self).await } } /// Return a random key from the currently selected database. /// /// - fn randomkey(&self) -> impl Future> + Send + fn randomkey(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::keys::randomkey(self).await?.convert() } } @@ -50,11 +53,11 @@ pub trait KeysInterface: ClientLike + Sized { destination: D, db: Option, replace: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, { async move { into!(source, destination); @@ -67,10 +70,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Serialize the value stored at `key` in a Redis-specific format and return it as bulk string. /// /// - fn dump(&self, key: K) -> impl Future> + Send + fn dump(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -82,10 +85,10 @@ pub trait KeysInterface: ClientLike + Sized { /// returned are: string, list, set, zset, hash and stream. /// /// - fn r#type(&self, key: K) -> impl Future> + Send + fn r#type(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -100,15 +103,15 @@ pub trait KeysInterface: ClientLike + Sized { &self, key: K, ttl: i64, - serialized: RedisValue, + serialized: Value, replace: bool, absttl: bool, idletime: Option, frequency: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -130,12 +133,12 @@ pub trait KeysInterface: ClientLike + Sized { expire: Option, options: Option, get: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -151,12 +154,12 @@ pub trait KeysInterface: ClientLike + Sized { /// Note: the command is regarded as deprecated since Redis 2.6.12. /// /// - fn setnx(&self, key: K, value: V) -> impl Future> + Send + fn setnx(&self, key: K, value: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -168,10 +171,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Read a value from the server. /// /// - fn get(&self, key: K) -> impl Future> + Send + fn get(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -184,10 +187,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Note: Command formerly called SUBSTR in Redis verison <=2.0. /// /// - fn getrange(&self, key: K, start: usize, end: usize) -> impl Future> + Send + fn getrange(&self, key: K, start: usize, end: usize) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -199,12 +202,12 @@ pub trait KeysInterface: ClientLike + Sized { /// `value`. /// /// - fn setrange(&self, key: K, offset: u32, value: V) -> impl Future> + Send + fn setrange(&self, key: K, offset: u32, value: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -218,12 +221,12 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if `key` does not hold string value. Returns nil if `key` does not exist. /// /// - fn getset(&self, key: K, value: V) -> impl Future> + Send + fn getset(&self, key: K, value: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -236,10 +239,10 @@ pub trait KeysInterface: ClientLike + Sized { /// deletes the key on success (if and only if the key's value type is a string). /// /// - fn getdel(&self, key: K) -> impl Future> + Send + fn getdel(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -250,10 +253,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns the length of the string value stored at key. An error is returned when key holds a non-string value. /// /// - fn strlen(&self, key: K) -> impl Future> + Send + fn strlen(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -266,9 +269,9 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns the number of keys removed. /// /// - fn del(&self, keys: K) -> impl Future> + Send + fn del(&self, keys: K) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -282,9 +285,9 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns the number of keys removed. /// /// - fn unlink(&self, keys: K) -> impl Future> + Send + fn unlink(&self, keys: K) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -298,11 +301,11 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error when `source` does not exist. If `destination` exists, it gets overwritten. /// /// - fn rename(&self, source: S, destination: D) -> impl Future> + Send + fn rename(&self, source: S, destination: D) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, { async move { into!(source); @@ -316,11 +319,11 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error when `source` does not exist. /// /// - fn renamenx(&self, source: S, destination: D) -> impl Future> + Send + fn renamenx(&self, source: S, destination: D) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, { async move { into!(source); @@ -332,12 +335,12 @@ pub trait KeysInterface: ClientLike + Sized { /// Append `value` to `key` if it's a string. /// /// - fn append(&self, key: K, value: V) -> impl Future> + Send + fn append(&self, key: K, value: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -350,9 +353,9 @@ pub trait KeysInterface: ClientLike + Sized { /// special value nil is returned. /// /// - fn mget(&self, keys: K) -> impl Future> + Send + fn mget(&self, keys: K) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -364,10 +367,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Sets the given keys to their respective values. /// /// - fn mset(&self, values: V) -> impl Future> + Send + fn mset(&self, values: V) -> impl Future> + Send where - V: TryInto + Send, - V::Error: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { try_into!(values); @@ -379,11 +382,11 @@ pub trait KeysInterface: ClientLike + Sized { /// single key already exists. /// /// - fn msetnx(&self, values: V) -> impl Future> + Send + fn msetnx(&self, values: V) -> impl Future> + Send where - R: FromRedis, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + V: TryInto + Send, + V::Error: Into + Send, { async move { try_into!(values); @@ -397,10 +400,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if the value at key is of the wrong type. /// /// - fn incr(&self, key: K) -> impl Future> + Send + fn incr(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -414,10 +417,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if the value at key is of the wrong type. /// /// - fn incr_by(&self, key: K, val: i64) -> impl Future> + Send + fn incr_by(&self, key: K, val: i64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -432,10 +435,10 @@ pub trait KeysInterface: ClientLike + Sized { /// value. /// /// - fn incr_by_float(&self, key: K, val: f64) -> impl Future> + Send + fn incr_by_float(&self, key: K, val: f64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -449,10 +452,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if the key contains a value of the wrong type. /// /// - fn decr(&self, key: K) -> impl Future> + Send + fn decr(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -466,10 +469,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if the key contains a value of the wrong type. /// /// - fn decr_by(&self, key: K, val: i64) -> impl Future> + Send + fn decr_by(&self, key: K, val: i64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -480,10 +483,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns the remaining time to live of a key that has a timeout, in seconds. /// /// - fn ttl(&self, key: K) -> impl Future> + Send + fn ttl(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -494,10 +497,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns the remaining time to live of a key that has a timeout, in milliseconds. /// /// - fn pttl(&self, key: K) -> impl Future> + Send + fn pttl(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -511,10 +514,10 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns a boolean value describing whether the timeout was removed. /// /// - fn persist(&self, key: K) -> impl Future> + Send + fn persist(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -525,38 +528,50 @@ pub trait KeysInterface: ClientLike + Sized { /// Set a timeout on key. After the timeout has expired, the key will be automatically deleted. /// /// - fn expire(&self, key: K, seconds: i64) -> impl Future> + Send + fn expire( + &self, + key: K, + seconds: i64, + options: Option, + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); - commands::keys::expire(self, key, seconds).await?.convert() + commands::keys::expire(self, key, seconds, options).await?.convert() } } /// Set a timeout on a key based on a UNIX timestamp. /// /// - fn expire_at(&self, key: K, timestamp: i64) -> impl Future> + Send + fn expire_at( + &self, + key: K, + timestamp: i64, + options: Option, + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); - commands::keys::expire_at(self, key, timestamp).await?.convert() + commands::keys::expire_at(self, key, timestamp, options) + .await? + .convert() } } /// Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which the given key will expire. /// /// - fn expire_time(&self, key: K) -> impl Future> + Send + fn expire_time(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -573,10 +588,10 @@ pub trait KeysInterface: ClientLike + Sized { key: K, milliseconds: i64, options: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -595,10 +610,10 @@ pub trait KeysInterface: ClientLike + Sized { key: K, timestamp: i64, options: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -612,10 +627,10 @@ pub trait KeysInterface: ClientLike + Sized { /// milliseconds instead of seconds. /// /// - fn pexpire_time(&self, key: K) -> impl Future> + Send + fn pexpire_time(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -626,9 +641,9 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns number of keys that exist from the `keys` arguments. /// /// - fn exists(&self, keys: K) -> impl Future> + Send + fn exists(&self, keys: K) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -648,11 +663,11 @@ pub trait KeysInterface: ClientLike + Sized { idx: bool, minmatchlen: Option, withmatchlen: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K1: Into + Send, - K2: Into + Send, + R: FromValue, + K1: Into + Send, + K2: Into + Send, { async move { into!(key1, key2); @@ -661,4 +676,29 @@ pub trait KeysInterface: ClientLike + Sized { .convert() } } + + /// Fetch one page of `SCAN` results with the provided cursor. + /// + /// With a clustered the deployment the caller must include a hash tag in the pattern or manually specify the server + /// via [with_cluster_node](crate::clients::Client::with_cluster_node) or + /// [with_options](crate::clients::Client::with_options). + fn scan_page( + &self, + cursor: S, + pattern: P, + count: Option, + r#type: Option, + ) -> impl Future> + Send + where + R: FromValue, + S: Into + Send, + P: Into + Send, + { + async move { + into!(cursor, pattern); + commands::scan::scan_page(self, cursor, pattern, count, r#type, None, None) + .await? + .convert() + } + } } diff --git a/src/commands/interfaces/lists.rs b/src/commands/interfaces/lists.rs index 86a7078e..30bf51d0 100644 --- a/src/commands/interfaces/lists.rs +++ b/src/commands/interfaces/lists.rs @@ -1,18 +1,17 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, + error::Error, + interfaces::{ClientLike, FredResult}, types::{ - FromRedis, - LMoveDirection, + lists::{LMoveDirection, ListLocation}, + FromValue, + Key, Limit, - ListLocation, MultipleKeys, MultipleStrings, MultipleValues, - RedisKey, - RedisValue, SortOrder, + Value, }, }; use bytes_utils::Str; @@ -32,9 +31,9 @@ pub trait ListInterface: ClientLike + Sized { keys: K, direction: LMoveDirection, count: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -50,9 +49,9 @@ pub trait ListInterface: ClientLike + Sized { /// that is non-empty, with the given keys being checked in the order that they are given. /// /// - fn blpop(&self, keys: K, timeout: f64) -> impl Future> + Send + fn blpop(&self, keys: K, timeout: f64) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -66,9 +65,9 @@ pub trait ListInterface: ClientLike + Sized { /// that is non-empty, with the given keys being checked in the order that they are given. /// /// - fn brpop(&self, keys: K, timeout: f64) -> impl Future> + Send + fn brpop(&self, keys: K, timeout: f64) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -80,16 +79,11 @@ pub trait ListInterface: ClientLike + Sized { /// The blocking equivalent of [Self::rpoplpush]. /// /// - fn brpoplpush( - &self, - source: S, - destination: D, - timeout: f64, - ) -> impl Future> + Send + fn brpoplpush(&self, source: S, destination: D, timeout: f64) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, { async move { into!(source, destination); @@ -109,11 +103,11 @@ pub trait ListInterface: ClientLike + Sized { source_direction: LMoveDirection, destination_direction: LMoveDirection, timeout: f64, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, { async move { into!(source, destination); @@ -138,9 +132,9 @@ pub trait ListInterface: ClientLike + Sized { keys: K, direction: LMoveDirection, count: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -152,10 +146,10 @@ pub trait ListInterface: ClientLike + Sized { /// Returns the element at index in the list stored at key. /// /// - fn lindex(&self, key: K, index: i64) -> impl Future> + Send + fn lindex(&self, key: K, index: i64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -172,14 +166,14 @@ pub trait ListInterface: ClientLike + Sized { location: ListLocation, pivot: P, element: V, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - P: TryInto + Send, - P::Error: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + P: TryInto + Send, + P::Error: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -193,10 +187,10 @@ pub trait ListInterface: ClientLike + Sized { /// Returns the length of the list stored at key. /// /// - fn llen(&self, key: K) -> impl Future> + Send + fn llen(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -207,10 +201,10 @@ pub trait ListInterface: ClientLike + Sized { /// Removes and returns the first elements of the list stored at key. /// /// - fn lpop(&self, key: K, count: Option) -> impl Future> + Send + fn lpop(&self, key: K, count: Option) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -228,12 +222,12 @@ pub trait ListInterface: ClientLike + Sized { rank: Option, count: Option, maxlen: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -247,12 +241,12 @@ pub trait ListInterface: ClientLike + Sized { /// Insert all the specified values at the head of the list stored at `key`. /// /// - fn lpush(&self, key: K, elements: V) -> impl Future> + Send + fn lpush(&self, key: K, elements: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -264,12 +258,12 @@ pub trait ListInterface: ClientLike + Sized { /// Inserts specified values at the head of the list stored at `key`, only if `key` already exists and holds a list. /// /// - fn lpushx(&self, key: K, elements: V) -> impl Future> + Send + fn lpushx(&self, key: K, elements: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -281,10 +275,10 @@ pub trait ListInterface: ClientLike + Sized { /// Returns the specified elements of the list stored at `key`. /// /// - fn lrange(&self, key: K, start: i64, stop: i64) -> impl Future> + Send + fn lrange(&self, key: K, start: i64, stop: i64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -295,12 +289,12 @@ pub trait ListInterface: ClientLike + Sized { /// Removes the first `count` occurrences of elements equal to `element` from the list stored at `key`. /// /// - fn lrem(&self, key: K, count: i64, element: V) -> impl Future> + Send + fn lrem(&self, key: K, count: i64, element: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -312,12 +306,12 @@ pub trait ListInterface: ClientLike + Sized { /// Sets the list element at `index` to `element`. /// /// - fn lset(&self, key: K, index: i64, element: V) -> impl Future> + Send + fn lset(&self, key: K, index: i64, element: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -329,10 +323,10 @@ pub trait ListInterface: ClientLike + Sized { /// Trim an existing list so that it will contain only the specified range of elements specified. /// /// - fn ltrim(&self, key: K, start: i64, stop: i64) -> impl Future> + Send + fn ltrim(&self, key: K, start: i64, stop: i64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -343,10 +337,10 @@ pub trait ListInterface: ClientLike + Sized { /// Removes and returns the last elements of the list stored at `key`. /// /// - fn rpop(&self, key: K, count: Option) -> impl Future> + Send + fn rpop(&self, key: K, count: Option) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -358,11 +352,11 @@ pub trait ListInterface: ClientLike + Sized { /// the first element (head) of the list stored at `destination`. /// /// - fn rpoplpush(&self, source: S, dest: D) -> impl Future> + Send + fn rpoplpush(&self, source: S, dest: D) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, { async move { into!(source, dest); @@ -381,11 +375,11 @@ pub trait ListInterface: ClientLike + Sized { dest: D, source_direction: LMoveDirection, dest_direction: LMoveDirection, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, { async move { into!(source, dest); @@ -398,12 +392,12 @@ pub trait ListInterface: ClientLike + Sized { /// Insert all the specified values at the tail of the list stored at `key`. /// /// - fn rpush(&self, key: K, elements: V) -> impl Future> + Send + fn rpush(&self, key: K, elements: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -415,12 +409,12 @@ pub trait ListInterface: ClientLike + Sized { /// Inserts specified values at the tail of the list stored at `key`, only if key already exists and holds a list. /// /// - fn rpushx(&self, key: K, elements: V) -> impl Future> + Send + fn rpushx(&self, key: K, elements: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -440,11 +434,11 @@ pub trait ListInterface: ClientLike + Sized { get: S, order: Option, alpha: bool, - store: Option, - ) -> impl Future> + Send + store: Option, + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, S: Into + Send, { async move { @@ -467,10 +461,10 @@ pub trait ListInterface: ClientLike + Sized { get: S, order: Option, alpha: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, S: Into + Send, { async move { diff --git a/src/commands/interfaces/lua.rs b/src/commands/interfaces/lua.rs index f36e088a..718ed9be 100644 --- a/src/commands/interfaces/lua.rs +++ b/src/commands/interfaces/lua.rs @@ -1,8 +1,14 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{FnPolicy, FromRedis, MultipleKeys, MultipleStrings, MultipleValues, ScriptDebugFlag}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{ + scripts::{FnPolicy, ScriptDebugFlag}, + FromValue, + MultipleKeys, + MultipleStrings, + MultipleValues, + }, }; use bytes::Bytes; use bytes_utils::Str; @@ -19,9 +25,9 @@ pub trait LuaInterface: ClientLike + Sized { /// Returns the SHA-1 hash of the script. /// /// - fn script_load(&self, script: S) -> impl Future> + Send + fn script_load(&self, script: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -35,9 +41,9 @@ pub trait LuaInterface: ClientLike + Sized { /// Returns the SHA-1 hash of the script. #[cfg(feature = "sha-1")] #[cfg_attr(docsrs, doc(cfg(feature = "sha-1")))] - fn script_load_cluster(&self, script: S) -> impl Future> + Send + fn script_load_cluster(&self, script: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -49,35 +55,35 @@ pub trait LuaInterface: ClientLike + Sized { /// Kills the currently executing Lua script, assuming no write operation was yet performed by the script. /// /// - fn script_kill(&self) -> impl Future> + Send { + fn script_kill(&self) -> impl Future> + Send { async move { commands::lua::script_kill(self).await } } /// A clustered variant of the [script_kill](Self::script_kill) command that issues the command to all primary nodes /// in the cluster. - fn script_kill_cluster(&self) -> impl Future> + Send { + fn script_kill_cluster(&self) -> impl Future> + Send { async move { commands::lua::script_kill_cluster(self).await } } /// Flush the Lua scripts cache. /// /// - fn script_flush(&self, r#async: bool) -> impl Future> + Send { + fn script_flush(&self, r#async: bool) -> impl Future> + Send { async move { commands::lua::script_flush(self, r#async).await } } /// A clustered variant of [script_flush](Self::script_flush) that flushes the script cache on all primary nodes in /// the cluster. - fn script_flush_cluster(&self, r#async: bool) -> impl Future> + Send { + fn script_flush_cluster(&self, r#async: bool) -> impl Future> + Send { async move { commands::lua::script_flush_cluster(self, r#async).await } } /// Returns information about the existence of the scripts in the script cache. /// /// - fn script_exists(&self, hashes: H) -> impl Future> + Send + fn script_exists(&self, hashes: H) -> impl Future> + Send where - R: FromRedis, + R: FromValue, H: Into + Send, { async move { @@ -89,7 +95,7 @@ pub trait LuaInterface: ClientLike + Sized { /// Set the debug mode for subsequent scripts executed with EVAL. /// /// - fn script_debug(&self, flag: ScriptDebugFlag) -> impl Future> + Send { + fn script_debug(&self, flag: ScriptDebugFlag) -> impl Future> + Send { async move { commands::lua::script_debug(self, flag).await } } @@ -98,13 +104,13 @@ pub trait LuaInterface: ClientLike + Sized { /// /// /// **Note: Use `None` to represent an empty set of keys or args.** - fn evalsha(&self, hash: S, keys: K, args: V) -> impl Future> + Send + fn evalsha(&self, hash: S, keys: K, args: V) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(hash, keys); @@ -118,13 +124,13 @@ pub trait LuaInterface: ClientLike + Sized { /// /// /// **Note: Use `None` to represent an empty set of keys or args.** - fn eval(&self, script: S, keys: K, args: V) -> impl Future> + Send + fn eval(&self, script: S, keys: K, args: V) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(script, keys); @@ -140,13 +146,13 @@ pub trait FunctionInterface: ClientLike + Sized { /// Invoke a function. /// /// - fn fcall(&self, func: F, keys: K, args: V) -> impl Future> + Send + fn fcall(&self, func: F, keys: K, args: V) -> impl Future> + Send where - R: FromRedis, + R: FromValue, F: Into + Send, K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(func); @@ -158,13 +164,13 @@ pub trait FunctionInterface: ClientLike + Sized { /// This is a read-only variant of the FCALL command that cannot execute commands that modify data. /// /// - fn fcall_ro(&self, func: F, keys: K, args: V) -> impl Future> + Send + fn fcall_ro(&self, func: F, keys: K, args: V) -> impl Future> + Send where - R: FromRedis, + R: FromValue, F: Into + Send, K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(func); @@ -176,9 +182,9 @@ pub trait FunctionInterface: ClientLike + Sized { /// Delete a library and all its functions. /// /// - fn function_delete(&self, library_name: S) -> impl Future> + Send + fn function_delete(&self, library_name: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -190,7 +196,7 @@ pub trait FunctionInterface: ClientLike + Sized { /// Delete a library and all its functions from each cluster node concurrently. /// /// - fn function_delete_cluster(&self, library_name: S) -> impl Future> + Send + fn function_delete_cluster(&self, library_name: S) -> impl Future> + Send where S: Into + Send, { @@ -203,9 +209,9 @@ pub trait FunctionInterface: ClientLike + Sized { /// Return the serialized payload of loaded libraries. /// /// - fn function_dump(&self) -> impl Future> + Send + fn function_dump(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::lua::function_dump(self).await?.convert() } } @@ -213,9 +219,9 @@ pub trait FunctionInterface: ClientLike + Sized { /// Deletes all the libraries. /// /// - fn function_flush(&self, r#async: bool) -> impl Future> + Send + fn function_flush(&self, r#async: bool) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::lua::function_flush(self, r#async).await?.convert() } } @@ -223,7 +229,7 @@ pub trait FunctionInterface: ClientLike + Sized { /// Deletes all the libraries on all cluster nodes concurrently. /// /// - fn function_flush_cluster(&self, r#async: bool) -> impl Future> + Send { + fn function_flush_cluster(&self, r#async: bool) -> impl Future> + Send { async move { commands::lua::function_flush_cluster(self, r#async).await } } @@ -233,9 +239,9 @@ pub trait FunctionInterface: ClientLike + Sized { /// possible. /// /// - fn function_kill(&self) -> impl Future> + Send + fn function_kill(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::lua::function_kill(self).await?.convert() } } @@ -243,13 +249,9 @@ pub trait FunctionInterface: ClientLike + Sized { /// Return information about the functions and libraries. /// /// - fn function_list( - &self, - library_name: Option, - withcode: bool, - ) -> impl Future> + Send + fn function_list(&self, library_name: Option, withcode: bool) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -263,9 +265,9 @@ pub trait FunctionInterface: ClientLike + Sized { /// Load a library to Redis. /// /// - fn function_load(&self, replace: bool, code: S) -> impl Future> + Send + fn function_load(&self, replace: bool, code: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -277,9 +279,9 @@ pub trait FunctionInterface: ClientLike + Sized { /// Load a library to Redis on all cluster nodes concurrently. /// /// - fn function_load_cluster(&self, replace: bool, code: S) -> impl Future> + Send + fn function_load_cluster(&self, replace: bool, code: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -295,12 +297,12 @@ pub trait FunctionInterface: ClientLike + Sized { /// /// /// Note: Use `FnPolicy::default()` to use the default function restore policy (`"APPEND"`). - fn function_restore(&self, serialized: B, policy: P) -> impl Future> + Send + fn function_restore(&self, serialized: B, policy: P) -> impl Future> + Send where - R: FromRedis, + R: FromValue, B: Into + Send, P: TryInto + Send, - P::Error: Into + Send, + P::Error: Into + Send, { async move { into!(serialized); @@ -316,11 +318,11 @@ pub trait FunctionInterface: ClientLike + Sized { /// /// /// Note: Use `FnPolicy::default()` to use the default function restore policy (`"APPEND"`). - fn function_restore_cluster(&self, serialized: B, policy: P) -> impl Future> + Send + fn function_restore_cluster(&self, serialized: B, policy: P) -> impl Future> + Send where B: Into + Send, P: TryInto + Send, - P::Error: Into + Send, + P::Error: Into + Send, { async move { into!(serialized); @@ -335,9 +337,9 @@ pub trait FunctionInterface: ClientLike + Sized { /// Note: This command runs on a backchannel connection to the server. /// /// - fn function_stats(&self) -> impl Future> + Send + fn function_stats(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::lua::function_stats(self).await?.convert() } } diff --git a/src/commands/interfaces/memory.rs b/src/commands/interfaces/memory.rs index 98466166..101921ee 100644 --- a/src/commands/interfaces/memory.rs +++ b/src/commands/interfaces/memory.rs @@ -1,8 +1,8 @@ use crate::{ commands, - interfaces::{ClientLike, RedisResult}, - prelude::FromRedis, - types::RedisKey, + interfaces::{ClientLike, FredResult}, + prelude::FromValue, + types::Key, }; use fred_macros::rm_send_if; use futures::Future; @@ -14,9 +14,9 @@ pub trait MemoryInterface: ClientLike + Sized { /// advises about possible remedies. /// /// - fn memory_doctor(&self) -> impl Future> + Send + fn memory_doctor(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::memory::memory_doctor(self).await?.convert() } } @@ -24,9 +24,9 @@ pub trait MemoryInterface: ClientLike + Sized { /// The MEMORY MALLOC-STATS command provides an internal statistics report from the memory allocator. /// /// - fn memory_malloc_stats(&self) -> impl Future> + Send + fn memory_malloc_stats(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::memory::memory_malloc_stats(self).await?.convert() } } @@ -34,16 +34,16 @@ pub trait MemoryInterface: ClientLike + Sized { /// The MEMORY PURGE command attempts to purge dirty pages so these can be reclaimed by the allocator. /// /// - fn memory_purge(&self) -> impl Future> + Send { + fn memory_purge(&self) -> impl Future> + Send { async move { commands::memory::memory_purge(self).await } } /// The MEMORY STATS command returns an Array reply about the memory usage of the server. /// /// - fn memory_stats(&self) -> impl Future> + Send + fn memory_stats(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::memory::memory_stats(self).await?.convert() } } @@ -51,10 +51,10 @@ pub trait MemoryInterface: ClientLike + Sized { /// The MEMORY USAGE command reports the number of bytes that a key and its value require to be stored in RAM. /// /// - fn memory_usage(&self, key: K, samples: Option) -> impl Future> + Send + fn memory_usage(&self, key: K, samples: Option) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); diff --git a/src/commands/interfaces/pubsub.rs b/src/commands/interfaces/pubsub.rs index deafe879..ecfb7bfd 100644 --- a/src/commands/interfaces/pubsub.rs +++ b/src/commands/interfaces/pubsub.rs @@ -1,8 +1,8 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{FromRedis, MultipleStrings, RedisValue}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{FromValue, MultipleStrings, Value}, }; use bytes_utils::Str; use fred_macros::rm_send_if; @@ -15,7 +15,7 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Subscribe to a channel on the publish-subscribe interface. /// /// - fn subscribe(&self, channels: S) -> impl Future> + Send + fn subscribe(&self, channels: S) -> impl Future> + Send where S: Into + Send, { @@ -28,7 +28,7 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Unsubscribe from a channel on the PubSub interface. /// /// - fn unsubscribe(&self, channels: S) -> impl Future> + Send + fn unsubscribe(&self, channels: S) -> impl Future> + Send where S: Into + Send, { @@ -41,7 +41,7 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Subscribes the client to the given patterns. /// /// - fn psubscribe(&self, patterns: S) -> impl Future> + Send + fn psubscribe(&self, patterns: S) -> impl Future> + Send where S: Into + Send, { @@ -56,7 +56,7 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// If no channels are provided this command returns an empty array. /// /// - fn punsubscribe(&self, patterns: S) -> impl Future> + Send + fn punsubscribe(&self, patterns: S) -> impl Future> + Send where S: Into + Send, { @@ -69,12 +69,12 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Publish a message on the PubSub interface, returning the number of clients that received the message. /// /// - fn publish(&self, channel: S, message: V) -> impl Future> + Send + fn publish(&self, channel: S, message: V) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(channel); @@ -86,7 +86,7 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Subscribes the client to the specified shard channels. /// /// - fn ssubscribe(&self, channels: C) -> impl Future> + Send + fn ssubscribe(&self, channels: C) -> impl Future> + Send where C: Into + Send, { @@ -101,7 +101,7 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// If no channels are provided this command returns an empty array. /// /// - fn sunsubscribe(&self, channels: C) -> impl Future> + Send + fn sunsubscribe(&self, channels: C) -> impl Future> + Send where C: Into + Send, { @@ -114,12 +114,12 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Posts a message to the given shard channel. /// /// - fn spublish(&self, channel: S, message: V) -> impl Future> + Send + fn spublish(&self, channel: S, message: V) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(channel); @@ -131,9 +131,9 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Lists the currently active channels. /// /// - fn pubsub_channels(&self, pattern: S) -> impl Future> + Send + fn pubsub_channels(&self, pattern: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -145,9 +145,9 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Returns the number of unique patterns that are subscribed to by clients. /// /// - fn pubsub_numpat(&self) -> impl Future> + Send + fn pubsub_numpat(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::pubsub::pubsub_numpat(self).await?.convert() } } @@ -155,9 +155,9 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Returns the number of subscribers (exclusive of clients subscribed to patterns) for the specified channels. /// /// - fn pubsub_numsub(&self, channels: S) -> impl Future> + Send + fn pubsub_numsub(&self, channels: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -169,9 +169,9 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Lists the currently active shard channels. /// /// - fn pubsub_shardchannels(&self, pattern: S) -> impl Future> + Send + fn pubsub_shardchannels(&self, pattern: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -183,9 +183,9 @@ pub trait PubsubInterface: ClientLike + Sized + Send { /// Returns the number of subscribers for the specified shard channels. /// /// - fn pubsub_shardnumsub(&self, channels: S) -> impl Future> + Send + fn pubsub_shardnumsub(&self, channels: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { diff --git a/src/commands/interfaces/redis_json.rs b/src/commands/interfaces/redis_json.rs index 75dd3266..ce45ff48 100644 --- a/src/commands/interfaces/redis_json.rs +++ b/src/commands/interfaces/redis_json.rs @@ -1,7 +1,7 @@ use crate::{ commands, - interfaces::{ClientLike, RedisResult}, - types::{FromRedis, MultipleKeys, MultipleStrings, RedisKey, SetOptions}, + interfaces::{ClientLike, FredResult}, + types::{FromValue, Key, MultipleKeys, MultipleStrings, SetOptions}, }; use bytes_utils::Str; use fred_macros::rm_send_if; @@ -28,7 +28,7 @@ use serde_json::Value; /// ```rust /// use fred::{json_quote, prelude::*}; /// use serde_json::json; -/// async fn example(client: &RedisClient) -> Result<(), RedisError> { +/// async fn example(client: &Client) -> Result<(), Error> { /// let _: () = client.json_set("foo", "$", json!(["a", "b"]), None).await?; /// /// // need to double quote string values in this context @@ -51,10 +51,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Append the json values into the array at path after the last element in it. /// /// - fn json_arrappend(&self, key: K, path: P, values: Vec) -> impl Future> + Send + fn json_arrappend(&self, key: K, path: P, values: Vec) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, V: Into + Send, { @@ -77,10 +77,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { value: V, start: Option, stop: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, V: Into + Send, { @@ -101,10 +101,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { path: P, index: i64, values: Vec, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, V: Into + Send, { @@ -120,10 +120,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Report the length of the JSON array at path in key. /// /// - fn json_arrlen(&self, key: K, path: Option

) -> impl Future> + Send + fn json_arrlen(&self, key: K, path: Option

) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -141,10 +141,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { key: K, path: Option

, index: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -165,10 +165,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { path: P, start: i64, stop: i64, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -182,10 +182,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Clear container values (arrays/objects) and set numeric values to 0 /// /// - fn json_clear(&self, key: K, path: Option

) -> impl Future> + Send + fn json_clear(&self, key: K, path: Option

) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -198,10 +198,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Report a value's memory usage in bytes /// /// - fn json_debug_memory(&self, key: K, path: Option

) -> impl Future> + Send + fn json_debug_memory(&self, key: K, path: Option

) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -216,10 +216,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Delete a value. /// /// - fn json_del(&self, key: K, path: P) -> impl Future> + Send + fn json_del(&self, key: K, path: P) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -238,10 +238,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { newline: Option, space: Option, paths: P, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, I: Into + Send, N: Into + Send, S: Into + Send, @@ -261,10 +261,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Merge a given JSON value into matching paths. /// /// - fn json_merge(&self, key: K, path: P, value: V) -> impl Future> + Send + fn json_merge(&self, key: K, path: P, value: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, V: Into + Send, { @@ -279,9 +279,9 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Return the values at path from multiple key arguments. /// /// - fn json_mget(&self, keys: K, path: P) -> impl Future> + Send + fn json_mget(&self, keys: K, path: P) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, P: Into + Send, { @@ -294,10 +294,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Set or update one or more JSON values according to the specified key-path-value triplets. /// /// - fn json_mset(&self, values: Vec<(K, P, V)>) -> impl Future> + Send + fn json_mset(&self, values: Vec<(K, P, V)>) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, V: Into + Send, { @@ -313,10 +313,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Increment the number value stored at path by number /// /// - fn json_numincrby(&self, key: K, path: P, value: V) -> impl Future> + Send + fn json_numincrby(&self, key: K, path: P, value: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, V: Into + Send, { @@ -331,10 +331,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Return the keys in the object that's referenced by path. /// /// - fn json_objkeys(&self, key: K, path: Option

) -> impl Future> + Send + fn json_objkeys(&self, key: K, path: Option

) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -347,10 +347,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Report the number of keys in the JSON object at path in key. /// /// - fn json_objlen(&self, key: K, path: Option

) -> impl Future> + Send + fn json_objlen(&self, key: K, path: Option

) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -363,10 +363,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Return the JSON in key in Redis serialization protocol specification form. /// /// - fn json_resp(&self, key: K, path: Option

) -> impl Future> + Send + fn json_resp(&self, key: K, path: Option

) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -385,10 +385,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { path: P, value: V, options: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, V: Into + Send, { @@ -408,10 +408,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { key: K, path: Option

, value: V, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, V: Into + Send, { @@ -427,10 +427,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Report the length of the JSON String at path in key. /// /// - fn json_strlen(&self, key: K, path: Option

) -> impl Future> + Send + fn json_strlen(&self, key: K, path: Option

) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -443,10 +443,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Toggle a Boolean value stored at path. /// /// - fn json_toggle(&self, key: K, path: P) -> impl Future> + Send + fn json_toggle(&self, key: K, path: P) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -458,10 +458,10 @@ pub trait RedisJsonInterface: ClientLike + Sized { /// Report the type of JSON value at path. /// /// - fn json_type(&self, key: K, path: Option

) -> impl Future> + Send + fn json_type(&self, key: K, path: Option

) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { diff --git a/src/commands/interfaces/redisearch.rs b/src/commands/interfaces/redisearch.rs index 3639585b..32e7caaf 100644 --- a/src/commands/interfaces/redisearch.rs +++ b/src/commands/interfaces/redisearch.rs @@ -1,18 +1,20 @@ use crate::{ commands, - interfaces::{ClientLike, RedisResult}, - prelude::RedisError, + interfaces::{ClientLike, FredResult}, + prelude::Error, types::{ - FromRedis, - FtAggregateOptions, - FtAlterOptions, - FtCreateOptions, - FtSearchOptions, + redisearch::{ + FtAggregateOptions, + FtAlterOptions, + FtCreateOptions, + FtSearchOptions, + SearchSchema, + SpellcheckTerms, + }, + FromValue, + Key, MultipleStrings, - RedisKey, - RedisValue, - SearchSchema, - SpellcheckTerms, + Value, }, }; use bytes::Bytes; @@ -27,9 +29,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Returns a list of all existing indexes. /// /// - fn ft_list(&self) -> impl Future> + Send + fn ft_list(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::redisearch::ft_list(self).await?.convert() } } @@ -42,9 +44,9 @@ pub trait RediSearchInterface: ClientLike + Sized { index: I, query: Q, options: FtAggregateOptions, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, Q: Into + Send, { @@ -66,9 +68,9 @@ pub trait RediSearchInterface: ClientLike + Sized { index: I, query: Q, options: FtSearchOptions, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, Q: Into + Send, { @@ -88,9 +90,9 @@ pub trait RediSearchInterface: ClientLike + Sized { index: I, options: FtCreateOptions, schema: Vec, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, { async move { @@ -104,9 +106,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Add a new attribute to the index. /// /// - fn ft_alter(&self, index: I, options: FtAlterOptions) -> impl Future> + Send + fn ft_alter(&self, index: I, options: FtAlterOptions) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, { async move { @@ -118,9 +120,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Add an alias to an index. /// /// - fn ft_aliasadd(&self, alias: A, index: I) -> impl Future> + Send + fn ft_aliasadd(&self, alias: A, index: I) -> impl Future> + Send where - R: FromRedis, + R: FromValue, A: Into + Send, I: Into + Send, { @@ -133,9 +135,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Remove an alias from an index. /// /// - fn ft_aliasdel(&self, alias: A) -> impl Future> + Send + fn ft_aliasdel(&self, alias: A) -> impl Future> + Send where - R: FromRedis, + R: FromValue, A: Into + Send, { async move { @@ -148,9 +150,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// alias association with the previous index. /// /// - fn ft_aliasupdate(&self, alias: A, index: I) -> impl Future> + Send + fn ft_aliasupdate(&self, alias: A, index: I) -> impl Future> + Send where - R: FromRedis, + R: FromValue, A: Into + Send, I: Into + Send, { @@ -165,9 +167,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Retrieve configuration options. /// /// - fn ft_config_get(&self, option: S) -> impl Future> + Send + fn ft_config_get(&self, option: S) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, { async move { @@ -179,12 +181,12 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Set the value of a RediSearch configuration parameter. /// /// - fn ft_config_set(&self, option: S, value: V) -> impl Future> + Send + fn ft_config_set(&self, option: S, value: V) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(option); @@ -198,12 +200,12 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Delete a cursor. /// /// - fn ft_cursor_del(&self, index: I, cursor: C) -> impl Future> + Send + fn ft_cursor_del(&self, index: I, cursor: C) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, - C: TryInto + Send, - C::Error: Into + Send, + C: TryInto + Send, + C::Error: Into + Send, { async move { into!(index); @@ -222,12 +224,12 @@ pub trait RediSearchInterface: ClientLike + Sized { index: I, cursor: C, count: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, - C: TryInto + Send, - C::Error: Into + Send, + C: TryInto + Send, + C::Error: Into + Send, { async move { into!(index); @@ -241,9 +243,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Add terms to a dictionary. /// /// - fn ft_dictadd(&self, dict: D, terms: S) -> impl Future> + fn ft_dictadd(&self, dict: D, terms: S) -> impl Future> where - R: FromRedis, + R: FromValue, D: Into + Send, S: Into + Send, { @@ -256,9 +258,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Remove terms from a dictionary. /// /// - fn ft_dictdel(&self, dict: D, terms: S) -> impl Future> + fn ft_dictdel(&self, dict: D, terms: S) -> impl Future> where - R: FromRedis, + R: FromValue, D: Into + Send, S: Into + Send, { @@ -271,9 +273,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Dump all terms in the given dictionary. /// /// - fn ft_dictdump(&self, dict: D) -> impl Future> + fn ft_dictdump(&self, dict: D) -> impl Future> where - R: FromRedis, + R: FromValue, D: Into + Send, { async move { @@ -285,9 +287,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Delete an index. /// /// - fn ft_dropindex(&self, index: I, dd: bool) -> impl Future> + Send + fn ft_dropindex(&self, index: I, dd: bool) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, { async move { @@ -304,9 +306,9 @@ pub trait RediSearchInterface: ClientLike + Sized { index: I, query: Q, dialect: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, Q: Into + Send, { @@ -321,9 +323,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Return information and statistics on the index. /// /// - fn ft_info(&self, index: I) -> impl Future> + Send + fn ft_info(&self, index: I) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, { async move { @@ -342,9 +344,9 @@ pub trait RediSearchInterface: ClientLike + Sized { distance: Option, terms: Option, dialect: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, Q: Into + Send, { @@ -366,10 +368,10 @@ pub trait RediSearchInterface: ClientLike + Sized { score: f64, incr: bool, payload: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, S: Into + Send, { async move { @@ -383,10 +385,10 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Delete a string from a suggestion index. /// /// - fn ft_sugdel(&self, key: K, string: S) -> impl Future> + Send + fn ft_sugdel(&self, key: K, string: S) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, S: Into + Send, { async move { @@ -406,10 +408,10 @@ pub trait RediSearchInterface: ClientLike + Sized { withscores: bool, withpayloads: bool, max: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, P: Into + Send, { async move { @@ -423,10 +425,10 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Get the size of an auto-complete suggestion dictionary. /// /// - fn ft_suglen(&self, key: K) -> impl Future> + Send + fn ft_suglen(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -437,9 +439,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Dump the contents of a synonym group. /// /// - fn ft_syndump(&self, index: I) -> impl Future> + Send + fn ft_syndump(&self, index: I) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, { async move { @@ -457,9 +459,9 @@ pub trait RediSearchInterface: ClientLike + Sized { synonym_group_id: S, skipinitialscan: bool, terms: T, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, S: Into + Send, T: Into + Send, @@ -475,9 +477,9 @@ pub trait RediSearchInterface: ClientLike + Sized { /// Return a distinct set of values indexed in a Tag field. /// /// - fn ft_tagvals(&self, index: I, field_name: F) -> impl Future> + Send + fn ft_tagvals(&self, index: I, field_name: F) -> impl Future> + Send where - R: FromRedis, + R: FromValue, I: Into + Send, F: Into + Send, { diff --git a/src/commands/interfaces/sentinel.rs b/src/commands/interfaces/sentinel.rs index d42c1fb9..cee175db 100644 --- a/src/commands/interfaces/sentinel.rs +++ b/src/commands/interfaces/sentinel.rs @@ -1,8 +1,8 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{FromRedis, RedisMap, RedisValue, SentinelFailureKind}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{FromValue, Map, SentinelFailureKind, Value}, }; use bytes_utils::Str; use fred_macros::rm_send_if; @@ -14,9 +14,9 @@ use std::{convert::TryInto, net::IpAddr}; pub trait SentinelInterface: ClientLike + Sized { /// Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the /// majority needed to authorize the failover. - fn ckquorum(&self, name: N) -> impl Future> + Send + fn ckquorum(&self, name: N) -> impl Future> + Send where - R: FromRedis, + R: FromValue, N: Into + Send, { async move { @@ -26,17 +26,17 @@ pub trait SentinelInterface: ClientLike + Sized { } /// Force Sentinel to rewrite its configuration on disk, including the current Sentinel state. - fn flushconfig(&self) -> impl Future> + Send + fn flushconfig(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::sentinel::flushconfig(self).await?.convert() } } /// Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels. - fn failover(&self, name: N) -> impl Future> + Send + fn failover(&self, name: N) -> impl Future> + Send where - R: FromRedis, + R: FromValue, N: Into + Send, { async move { @@ -46,9 +46,9 @@ pub trait SentinelInterface: ClientLike + Sized { } /// Return the ip and port number of the master with that name. - fn get_master_addr_by_name(&self, name: N) -> impl Future> + Send + fn get_master_addr_by_name(&self, name: N) -> impl Future> + Send where - R: FromRedis, + R: FromValue, N: Into + Send, { async move { @@ -58,17 +58,17 @@ pub trait SentinelInterface: ClientLike + Sized { } /// Return cached INFO output from masters and replicas. - fn info_cache(&self) -> impl Future> + Send + fn info_cache(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::sentinel::info_cache(self).await?.convert() } } /// Show the state and info of the specified master. - fn master(&self, name: N) -> impl Future> + Send + fn master(&self, name: N) -> impl Future> + Send where - R: FromRedis, + R: FromValue, N: Into + Send, { async move { @@ -78,9 +78,9 @@ pub trait SentinelInterface: ClientLike + Sized { } /// Show a list of monitored masters and their state. - fn masters(&self) -> impl Future> + Send + fn masters(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::sentinel::masters(self).await?.convert() } } @@ -88,9 +88,9 @@ pub trait SentinelInterface: ClientLike + Sized { /// Start Sentinel's monitoring. /// /// - fn monitor(&self, name: N, ip: IpAddr, port: u16, quorum: u32) -> impl Future> + Send + fn monitor(&self, name: N, ip: IpAddr, port: u16, quorum: u32) -> impl Future> + Send where - R: FromRedis, + R: FromValue, N: Into + Send, { async move { @@ -102,17 +102,17 @@ pub trait SentinelInterface: ClientLike + Sized { } /// Return the ID of the Sentinel instance. - fn myid(&self) -> impl Future> + Send + fn myid(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::sentinel::myid(self).await?.convert() } } /// This command returns information about pending scripts. - fn pending_scripts(&self) -> impl Future> + Send + fn pending_scripts(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::sentinel::pending_scripts(self).await?.convert() } } @@ -120,9 +120,9 @@ pub trait SentinelInterface: ClientLike + Sized { /// Stop Sentinel's monitoring. /// /// - fn remove(&self, name: N) -> impl Future> + Send + fn remove(&self, name: N) -> impl Future> + Send where - R: FromRedis, + R: FromValue, N: Into + Send, { async move { @@ -132,9 +132,9 @@ pub trait SentinelInterface: ClientLike + Sized { } /// Show a list of replicas for this master, and their state. - fn replicas(&self, name: N) -> impl Future> + Send + fn replicas(&self, name: N) -> impl Future> + Send where - R: FromRedis, + R: FromValue, N: Into + Send, { async move { @@ -144,9 +144,9 @@ pub trait SentinelInterface: ClientLike + Sized { } /// Show a list of sentinel instances for this master, and their state. - fn sentinels(&self, name: N) -> impl Future> + Send + fn sentinels(&self, name: N) -> impl Future> + Send where - R: FromRedis, + R: FromValue, N: Into + Send, { async move { @@ -158,12 +158,12 @@ pub trait SentinelInterface: ClientLike + Sized { /// Set Sentinel's monitoring configuration. /// /// - fn set(&self, name: N, args: V) -> impl Future> + Send + fn set(&self, name: N, args: V) -> impl Future> + Send where - R: FromRedis, + R: FromValue, N: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(name); @@ -173,17 +173,17 @@ pub trait SentinelInterface: ClientLike + Sized { } /// This command simulates different Sentinel crash scenarios. - fn simulate_failure(&self, kind: SentinelFailureKind) -> impl Future> + Send + fn simulate_failure(&self, kind: SentinelFailureKind) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::sentinel::simulate_failure(self, kind).await?.convert() } } /// This command will reset all the masters with matching name. - fn reset(&self, pattern: P) -> impl Future> + Send + fn reset(&self, pattern: P) -> impl Future> + Send where - R: FromRedis, + R: FromValue, P: Into + Send, { async move { @@ -194,9 +194,9 @@ pub trait SentinelInterface: ClientLike + Sized { /// Get the current value of a global Sentinel configuration parameter. The specified name may be a wildcard, /// similar to the Redis CONFIG GET command. - fn config_get(&self, name: K) -> impl Future> + Send + fn config_get(&self, name: K) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -206,12 +206,12 @@ pub trait SentinelInterface: ClientLike + Sized { } /// Set the value of a global Sentinel configuration parameter. - fn config_set(&self, name: K, value: V) -> impl Future> + Send + fn config_set(&self, name: K, value: V) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(name); diff --git a/src/commands/interfaces/server.rs b/src/commands/interfaces/server.rs index 0f5e8166..084c4331 100644 --- a/src/commands/interfaces/server.rs +++ b/src/commands/interfaces/server.rs @@ -1,8 +1,8 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{FromRedis, Server}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{config::Server, FromValue, Value}, }; use fred_macros::rm_send_if; use futures::Future; @@ -13,9 +13,9 @@ pub trait ServerInterface: ClientLike { /// Instruct Redis to start an Append Only File rewrite process. /// /// - fn bgrewriteaof(&self) -> impl Future> + Send + fn bgrewriteaof(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::server::bgrewriteaof(self).await?.convert() } } @@ -23,9 +23,9 @@ pub trait ServerInterface: ClientLike { /// Save the DB in background. /// /// - fn bgsave(&self) -> impl Future> + Send + fn bgsave(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::server::bgsave(self).await?.convert() } } @@ -33,9 +33,9 @@ pub trait ServerInterface: ClientLike { /// Return the number of keys in the selected database. /// /// - fn dbsize(&self) -> impl Future> + Send + fn dbsize(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::server::dbsize(self).await?.convert() } } @@ -43,8 +43,15 @@ pub trait ServerInterface: ClientLike { /// Select the database this client should use. /// /// - fn select(&self, db: u8) -> impl Future> + Send { - async move { commands::server::select(self, db).await?.convert() } + fn select(&self, index: I) -> impl Future> + Send + where + I: TryInto + Send, + I::Error: Into + Send, + { + async move { + try_into!(index); + commands::server::select(self, index).await?.convert() + } } /// This command will start a coordinated failover between the currently-connected-to master and one of its @@ -57,16 +64,16 @@ pub trait ServerInterface: ClientLike { force: bool, abort: bool, timeout: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { commands::server::failover(self, to, force, abort, timeout).await } } /// Return the UNIX TIME of the last DB save executed with success. /// /// - fn lastsave(&self) -> impl Future> + Send + fn lastsave(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::server::lastsave(self).await?.convert() } } @@ -76,9 +83,9 @@ pub trait ServerInterface: ClientLike { /// reached, the command returns even if the specified number of replicas were not yet reached. /// /// - fn wait(&self, numreplicas: i64, timeout: i64) -> impl Future> + Send + fn wait(&self, numreplicas: i64, timeout: i64) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::server::wait(self, numreplicas, timeout).await?.convert() } } diff --git a/src/commands/interfaces/sets.rs b/src/commands/interfaces/sets.rs index 643bc588..470e8808 100644 --- a/src/commands/interfaces/sets.rs +++ b/src/commands/interfaces/sets.rs @@ -1,8 +1,8 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, - types::{FromRedis, MultipleKeys, MultipleValues, RedisKey, RedisValue}, + error::Error, + interfaces::{ClientLike, FredResult}, + types::{FromValue, Key, MultipleKeys, MultipleValues, Value}, }; use fred_macros::rm_send_if; use futures::Future; @@ -14,12 +14,12 @@ pub trait SetsInterface: ClientLike + Sized { /// Add the specified members to the set stored at `key`. /// /// - fn sadd(&self, key: K, members: V) -> impl Future> + Send + fn sadd(&self, key: K, members: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -31,10 +31,10 @@ pub trait SetsInterface: ClientLike + Sized { /// Returns the set cardinality (number of elements) of the set stored at `key`. /// /// - fn scard(&self, key: K) -> impl Future> + Send + fn scard(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -45,9 +45,9 @@ pub trait SetsInterface: ClientLike + Sized { /// Returns the members of the set resulting from the difference between the first set and all the successive sets. /// /// - fn sdiff(&self, keys: K) -> impl Future> + Send + fn sdiff(&self, keys: K) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -59,10 +59,10 @@ pub trait SetsInterface: ClientLike + Sized { /// This command is equal to SDIFF, but instead of returning the resulting set, it is stored in `destination`. /// /// - fn sdiffstore(&self, dest: D, keys: K) -> impl Future> + Send + fn sdiffstore(&self, dest: D, keys: K) -> impl Future> + Send where - R: FromRedis, - D: Into + Send, + R: FromValue, + D: Into + Send, K: Into + Send, { async move { @@ -74,9 +74,9 @@ pub trait SetsInterface: ClientLike + Sized { /// Returns the members of the set resulting from the intersection of all the given sets. /// /// - fn sinter(&self, keys: K) -> impl Future> + Send + fn sinter(&self, keys: K) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -88,10 +88,10 @@ pub trait SetsInterface: ClientLike + Sized { /// This command is equal to SINTER, but instead of returning the resulting set, it is stored in `destination`. /// /// - fn sinterstore(&self, dest: D, keys: K) -> impl Future> + Send + fn sinterstore(&self, dest: D, keys: K) -> impl Future> + Send where - R: FromRedis, - D: Into + Send, + R: FromValue, + D: Into + Send, K: Into + Send, { async move { @@ -103,12 +103,12 @@ pub trait SetsInterface: ClientLike + Sized { /// Returns if `member` is a member of the set stored at `key`. /// /// - fn sismember(&self, key: K, member: V) -> impl Future> + Send + fn sismember(&self, key: K, member: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -120,12 +120,12 @@ pub trait SetsInterface: ClientLike + Sized { /// Returns whether each member is a member of the set stored at `key`. /// /// - fn smismember(&self, key: K, members: V) -> impl Future> + Send + fn smismember(&self, key: K, members: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -137,10 +137,10 @@ pub trait SetsInterface: ClientLike + Sized { /// Returns all the members of the set value stored at `key`. /// /// - fn smembers(&self, key: K) -> impl Future> + Send + fn smembers(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -151,13 +151,13 @@ pub trait SetsInterface: ClientLike + Sized { /// Move `member` from the set at `source` to the set at `destination`. /// /// - fn smove(&self, source: S, dest: D, member: V) -> impl Future> + Send + fn smove(&self, source: S, dest: D, member: V) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(source, dest); @@ -169,10 +169,10 @@ pub trait SetsInterface: ClientLike + Sized { /// Removes and returns one or more random members from the set value store at `key`. /// /// - fn spop(&self, key: K, count: Option) -> impl Future> + Send + fn spop(&self, key: K, count: Option) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -186,10 +186,10 @@ pub trait SetsInterface: ClientLike + Sized { /// count or the set's cardinality (SCARD), whichever is lower. /// /// - fn srandmember(&self, key: K, count: Option) -> impl Future> + Send + fn srandmember(&self, key: K, count: Option) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -200,12 +200,12 @@ pub trait SetsInterface: ClientLike + Sized { /// Remove the specified members from the set stored at `key`. /// /// - fn srem(&self, key: K, members: V) -> impl Future> + Send + fn srem(&self, key: K, members: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -217,9 +217,9 @@ pub trait SetsInterface: ClientLike + Sized { /// Returns the members of the set resulting from the union of all the given sets. /// /// - fn sunion(&self, keys: K) -> impl Future> + Send + fn sunion(&self, keys: K) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -231,10 +231,10 @@ pub trait SetsInterface: ClientLike + Sized { /// This command is equal to SUNION, but instead of returning the resulting set, it is stored in `destination`. /// /// - fn sunionstore(&self, dest: D, keys: K) -> impl Future> + Send + fn sunionstore(&self, dest: D, keys: K) -> impl Future> + Send where - R: FromRedis, - D: Into + Send, + R: FromValue, + D: Into + Send, K: Into + Send, { async move { diff --git a/src/commands/interfaces/slowlog.rs b/src/commands/interfaces/slowlog.rs index d02bdbf1..88c50a45 100644 --- a/src/commands/interfaces/slowlog.rs +++ b/src/commands/interfaces/slowlog.rs @@ -1,7 +1,7 @@ use crate::{ commands, - interfaces::{ClientLike, RedisResult}, - types::FromRedis, + interfaces::{ClientLike, FredResult}, + types::FromValue, }; use fred_macros::rm_send_if; use futures::Future; @@ -12,9 +12,9 @@ pub trait SlowlogInterface: ClientLike + Sized { /// This command is used to read the slow queries log. /// /// - fn slowlog_get(&self, count: Option) -> impl Future> + Send + fn slowlog_get(&self, count: Option) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::slowlog::slowlog_get(self, count).await?.convert() } } @@ -22,9 +22,9 @@ pub trait SlowlogInterface: ClientLike + Sized { /// This command is used to read length of the slow queries log. /// /// - fn slowlog_length(&self) -> impl Future> + Send + fn slowlog_length(&self) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::slowlog::slowlog_length(self).await?.convert() } } @@ -32,7 +32,7 @@ pub trait SlowlogInterface: ClientLike + Sized { /// This command is used to reset the slow queries log. /// /// - fn slowlog_reset(&self) -> impl Future> + Send { + fn slowlog_reset(&self) -> impl Future> + Send { async move { commands::slowlog::slowlog_reset(self).await } } } diff --git a/src/commands/interfaces/sorted_sets.rs b/src/commands/interfaces/sorted_sets.rs index 2ce8f98f..38a7467c 100644 --- a/src/commands/interfaces/sorted_sets.rs +++ b/src/commands/interfaces/sorted_sets.rs @@ -1,22 +1,16 @@ use crate::{ commands, - error::RedisError, - interfaces::{ClientLike, RedisResult}, + error::Error, + interfaces::{ClientLike, FredResult}, types::{ - AggregateOptions, - FromRedis, + sorted_sets::{AggregateOptions, MultipleWeights, MultipleZaddValues, Ordering, ZCmp, ZRange, ZSort}, + FromValue, + Key, Limit, MultipleKeys, MultipleValues, - MultipleWeights, - MultipleZaddValues, - Ordering, - RedisKey, - RedisValue, SetOptions, - ZCmp, - ZRange, - ZSort, + Value, }, }; use fred_macros::rm_send_if; @@ -35,9 +29,9 @@ pub trait SortedSetsInterface: ClientLike + Sized { keys: K, sort: ZCmp, count: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -51,9 +45,9 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// The blocking variant of [Self::zpopmin]. /// /// - fn bzpopmin(&self, keys: K, timeout: f64) -> impl Future> + Send + fn bzpopmin(&self, keys: K, timeout: f64) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -65,9 +59,9 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// The blocking variant of [Self::zpopmax]. /// /// - fn bzpopmax(&self, keys: K, timeout: f64) -> impl Future> + Send + fn bzpopmax(&self, keys: K, timeout: f64) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -87,12 +81,12 @@ pub trait SortedSetsInterface: ClientLike + Sized { changed: bool, incr: bool, values: V, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -106,10 +100,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Returns the sorted set cardinality (number of elements) of the sorted set stored at `key`. /// /// - fn zcard(&self, key: K) -> impl Future> + Send + fn zcard(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -120,10 +114,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Returns the number of elements in the sorted set at `key` with a score between `min` and `max`. /// /// - fn zcount(&self, key: K, min: f64, max: f64) -> impl Future> + Send + fn zcount(&self, key: K, min: f64, max: f64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -135,9 +129,9 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// client. /// /// - fn zdiff(&self, keys: K, withscores: bool) -> impl Future> + Send + fn zdiff(&self, keys: K, withscores: bool) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -150,10 +144,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// `destination`. /// /// - fn zdiffstore(&self, dest: D, keys: K) -> impl Future> + Send + fn zdiffstore(&self, dest: D, keys: K) -> impl Future> + Send where - R: FromRedis, - D: Into + Send, + R: FromValue, + D: Into + Send, K: Into + Send, { async move { @@ -165,12 +159,12 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Increments the score of `member` in the sorted set stored at `key` by `increment`. /// /// - fn zincrby(&self, key: K, increment: f64, member: V) -> impl Future> + Send + fn zincrby(&self, key: K, increment: f64, member: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -191,9 +185,9 @@ pub trait SortedSetsInterface: ClientLike + Sized { weights: W, aggregate: Option, withscores: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, W: Into + Send, { @@ -215,10 +209,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { keys: K, weights: W, aggregate: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - D: Into + Send, + R: FromValue, + D: Into + Send, K: Into + Send, W: Into + Send, { @@ -235,14 +229,14 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// max. /// /// - fn zlexcount(&self, key: K, min: M, max: N) -> impl Future> + Send + fn zlexcount(&self, key: K, min: M, max: N) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, M: TryInto + Send, - M::Error: Into + Send, + M::Error: Into + Send, N: TryInto + Send, - N::Error: Into + Send, + N::Error: Into + Send, { async move { into!(key); @@ -254,10 +248,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Removes and returns up to count members with the highest scores in the sorted set stored at `key`. /// /// - fn zpopmax(&self, key: K, count: Option) -> impl Future> + Send + fn zpopmax(&self, key: K, count: Option) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -268,10 +262,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Removes and returns up to count members with the lowest scores in the sorted set stored at `key`. /// /// - fn zpopmin(&self, key: K, count: Option) -> impl Future> + Send + fn zpopmin(&self, key: K, count: Option) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -283,9 +277,9 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// of key names. /// /// - fn zmpop(&self, keys: K, sort: ZCmp, count: Option) -> impl Future> + Send + fn zmpop(&self, keys: K, sort: ZCmp, count: Option) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, { async move { @@ -297,10 +291,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// When called with just the key argument, return a random element from the sorted set value stored at `key`. /// /// - fn zrandmember(&self, key: K, count: Option<(i64, bool)>) -> impl Future> + Send + fn zrandmember(&self, key: K, count: Option<(i64, bool)>) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -320,15 +314,15 @@ pub trait SortedSetsInterface: ClientLike + Sized { sort: Option, rev: bool, limit: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - D: Into + Send, - S: Into + Send, + R: FromValue, + D: Into + Send, + S: Into + Send, M: TryInto + Send, - M::Error: Into + Send, + M::Error: Into + Send, N: TryInto + Send, - N::Error: Into + Send, + N::Error: Into + Send, { async move { into!(dest, source); @@ -351,14 +345,14 @@ pub trait SortedSetsInterface: ClientLike + Sized { rev: bool, limit: Option, withscores: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, M: TryInto + Send, - M::Error: Into + Send, + M::Error: Into + Send, N: TryInto + Send, - N::Error: Into + Send, + N::Error: Into + Send, { async move { into!(key); @@ -379,14 +373,14 @@ pub trait SortedSetsInterface: ClientLike + Sized { min: M, max: N, limit: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, M: TryInto + Send, - M::Error: Into + Send, + M::Error: Into + Send, N: TryInto + Send, - N::Error: Into + Send, + N::Error: Into + Send, { async move { into!(key); @@ -407,14 +401,14 @@ pub trait SortedSetsInterface: ClientLike + Sized { max: M, min: N, limit: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, M: TryInto + Send, - M::Error: Into + Send, + M::Error: Into + Send, N: TryInto + Send, - N::Error: Into + Send, + N::Error: Into + Send, { async move { into!(key); @@ -436,14 +430,14 @@ pub trait SortedSetsInterface: ClientLike + Sized { max: N, withscores: bool, limit: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, M: TryInto + Send, - M::Error: Into + Send, + M::Error: Into + Send, N: TryInto + Send, - N::Error: Into + Send, + N::Error: Into + Send, { async move { into!(key); @@ -465,14 +459,14 @@ pub trait SortedSetsInterface: ClientLike + Sized { min: N, withscores: bool, limit: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, M: TryInto + Send, - M::Error: Into + Send, + M::Error: Into + Send, N: TryInto + Send, - N::Error: Into + Send, + N::Error: Into + Send, { async move { into!(key); @@ -486,29 +480,31 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Returns the rank of member in the sorted set stored at `key`, with the scores ordered from low to high. /// /// - fn zrank(&self, key: K, member: V) -> impl Future> + Send + fn zrank(&self, key: K, member: V, withscore: bool) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); try_into!(member); - commands::sorted_sets::zrank(self, key, member).await?.convert() + commands::sorted_sets::zrank(self, key, member, withscore) + .await? + .convert() } } /// Removes the specified members from the sorted set stored at `key`. Non existing members are ignored. /// /// - fn zrem(&self, key: K, members: V) -> impl Future> + Send + fn zrem(&self, key: K, members: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); @@ -522,14 +518,14 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// specified by `min` and `max`. /// /// - fn zremrangebylex(&self, key: K, min: M, max: N) -> impl Future> + Send + fn zremrangebylex(&self, key: K, min: M, max: N) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, M: TryInto + Send, - M::Error: Into + Send, + M::Error: Into + Send, N: TryInto + Send, - N::Error: Into + Send, + N::Error: Into + Send, { async move { into!(key); @@ -543,10 +539,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Removes all elements in the sorted set stored at `key` with rank between `start` and `stop`. /// /// - fn zremrangebyrank(&self, key: K, start: i64, stop: i64) -> impl Future> + Send + fn zremrangebyrank(&self, key: K, start: i64, stop: i64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -559,14 +555,14 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Removes all elements in the sorted set stored at `key` with a score between `min` and `max`. /// /// - fn zremrangebyscore(&self, key: K, min: M, max: N) -> impl Future> + Send + fn zremrangebyscore(&self, key: K, min: M, max: N) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, M: TryInto + Send, - M::Error: Into + Send, + M::Error: Into + Send, N: TryInto + Send, - N::Error: Into + Send, + N::Error: Into + Send, { async move { into!(key); @@ -586,10 +582,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { start: i64, stop: i64, withscores: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -602,29 +598,31 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Returns the rank of `member` in the sorted set stored at `key`, with the scores ordered from high to low. /// /// - fn zrevrank(&self, key: K, member: V) -> impl Future> + Send + fn zrevrank(&self, key: K, member: V, withscore: bool) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); try_into!(member); - commands::sorted_sets::zrevrank(self, key, member).await?.convert() + commands::sorted_sets::zrevrank(self, key, member, withscore) + .await? + .convert() } } /// Returns the score of `member` in the sorted set at `key`. /// /// - fn zscore(&self, key: K, member: V) -> impl Future> + Send + fn zscore(&self, key: K, member: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - V: TryInto + Send, - V::Error: Into + Send, + R: FromValue, + K: Into + Send, + V: TryInto + Send, + V::Error: Into + Send, { async move { into!(key); @@ -643,9 +641,9 @@ pub trait SortedSetsInterface: ClientLike + Sized { weights: W, aggregate: Option, withscores: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, W: Into + Send, { @@ -666,10 +664,10 @@ pub trait SortedSetsInterface: ClientLike + Sized { keys: K, weights: W, aggregate: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - D: Into + Send, + R: FromValue, + D: Into + Send, K: Into + Send, W: Into + Send, { @@ -684,12 +682,12 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// Returns the scores associated with the specified members in the sorted set stored at `key`. /// /// - fn zmscore(&self, key: K, members: V) -> impl Future> + Send + fn zmscore(&self, key: K, members: V) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { async move { into!(key); diff --git a/src/commands/interfaces/streams.rs b/src/commands/interfaces/streams.rs index 8526a057..124a6000 100644 --- a/src/commands/interfaces/streams.rs +++ b/src/commands/interfaces/streams.rs @@ -1,21 +1,15 @@ use crate::{ commands, - interfaces::{ClientLike, RedisResult}, - prelude::RedisError, + interfaces::{ClientLike, FredResult}, + prelude::Error, types::{ - FromRedis, - FromRedisKey, - MultipleIDs, + streams::{MultipleIDs, MultipleOrderedPairs, XCap, XPendingArgs, XReadResponse, XReadValue, XID}, + FromKey, + FromValue, + Key, MultipleKeys, - MultipleOrderedPairs, MultipleStrings, - RedisKey, - RedisValue, - XCap, - XPendingArgs, - XReadResponse, - XReadValue, - XID, + Value, }, }; use bytes_utils::Str; @@ -36,10 +30,10 @@ pub trait StreamsInterface: ClientLike + Sized { /// `key`. /// /// - fn xinfo_consumers(&self, key: K, groupname: S) -> impl Future> + Send + fn xinfo_consumers(&self, key: K, groupname: S) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, S: Into + Send, { async move { @@ -53,10 +47,10 @@ pub trait StreamsInterface: ClientLike + Sized { /// This command returns the list of all consumers groups of the stream stored at `key`. /// /// - fn xinfo_groups(&self, key: K) -> impl Future> + Send + fn xinfo_groups(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -67,10 +61,10 @@ pub trait StreamsInterface: ClientLike + Sized { /// This command returns information about the stream stored at `key`. /// /// - fn xinfo_stream(&self, key: K, full: bool, count: Option) -> impl Future> + Send + fn xinfo_stream(&self, key: K, full: bool, count: Option) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -90,15 +84,15 @@ pub trait StreamsInterface: ClientLike + Sized { cap: C, id: I, fields: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, I: Into + Send, F: TryInto + Send, - F::Error: Into + Send, + F::Error: Into + Send, C: TryInto + Send, - C::Error: Into + Send, + C::Error: Into + Send, { async move { into!(key, id); @@ -112,12 +106,12 @@ pub trait StreamsInterface: ClientLike + Sized { /// Trims the stream by evicting older entries (entries with lower IDs) if needed. /// /// - fn xtrim(&self, key: K, cap: C) -> impl Future> + Send + fn xtrim(&self, key: K, cap: C) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, C: TryInto + Send, - C::Error: Into + Send, + C::Error: Into + Send, { async move { into!(key); @@ -129,10 +123,10 @@ pub trait StreamsInterface: ClientLike + Sized { /// Removes the specified entries from a stream, and returns the number of entries deleted. /// /// - fn xdel(&self, key: K, ids: S) -> impl Future> + Send + fn xdel(&self, key: K, ids: S) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, S: Into + Send, { async move { @@ -151,16 +145,16 @@ pub trait StreamsInterface: ClientLike + Sized { start: S, end: E, count: Option, - ) -> impl Future>>> + Send + ) -> impl Future>>> + Send where - Ri: FromRedis, - Rk: FromRedisKey + Hash + Eq, - Rv: FromRedis, - K: Into + Send, - S: TryInto + Send, - S::Error: Into + Send, - E: TryInto + Send, - E::Error: Into + Send, + Ri: FromValue, + Rk: FromKey + Hash + Eq, + Rv: FromValue, + K: Into + Send, + S: TryInto + Send, + S::Error: Into + Send, + E: TryInto + Send, + E::Error: Into + Send, { async move { into!(key); @@ -184,14 +178,14 @@ pub trait StreamsInterface: ClientLike + Sized { start: S, end: E, count: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - S: TryInto + Send, - S::Error: Into + Send, - E: TryInto + Send, - E::Error: Into + Send, + R: FromValue, + K: Into + Send, + S: TryInto + Send, + S::Error: Into + Send, + E: TryInto + Send, + E::Error: Into + Send, { async move { into!(key); @@ -210,16 +204,16 @@ pub trait StreamsInterface: ClientLike + Sized { end: E, start: S, count: Option, - ) -> impl Future>>> + Send + ) -> impl Future>>> + Send where - Ri: FromRedis, - Rk: FromRedisKey + Hash + Eq, - Rv: FromRedis, - K: Into + Send, - S: TryInto + Send, - S::Error: Into + Send, - E: TryInto + Send, - E::Error: Into + Send, + Ri: FromValue, + Rk: FromKey + Hash + Eq, + Rv: FromValue, + K: Into + Send, + S: TryInto + Send, + S::Error: Into + Send, + E: TryInto + Send, + E::Error: Into + Send, { async move { into!(key); @@ -241,14 +235,14 @@ pub trait StreamsInterface: ClientLike + Sized { end: E, start: S, count: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - S: TryInto + Send, - S::Error: Into + Send, - E: TryInto + Send, - E::Error: Into + Send, + R: FromValue, + K: Into + Send, + S: TryInto + Send, + S::Error: Into + Send, + E: TryInto + Send, + E::Error: Into + Send, { async move { into!(key); @@ -262,10 +256,10 @@ pub trait StreamsInterface: ClientLike + Sized { /// Returns the number of entries inside a stream. /// /// - fn xlen(&self, key: K) -> impl Future> + Send + fn xlen(&self, key: K) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -281,12 +275,12 @@ pub trait StreamsInterface: ClientLike + Sized { /// The `XREAD` and `XREADGROUP` commands return values that can be interpreted differently in RESP2 and RESP3 mode. /// In many cases it is also easier to operate on the return values of these functions as a `HashMap`, but /// manually declaring this type can be very verbose. This function will automatically convert the response to the - /// [most common](crate::types::XReadResponse) map representation while also handling the encoding differences - /// between RESP2 and RESP3. + /// [most common](crate::types::streams::XReadResponse) map representation while also handling the encoding + /// differences between RESP2 and RESP3. /// /// ```rust no_run - /// # use fred::{prelude::*, types::XReadResponse}; - /// async fn example(client: RedisClient) -> Result<(), RedisError> { + /// # use fred::{prelude::*, types::streams::XReadResponse}; + /// async fn example(client: Client) -> Result<(), Error> { /// // borrowed from the tests. XREAD and XREADGROUP are very similar. /// let result: XReadResponse = client /// .xreadgroup_map("group1", "consumer1", None, None, false, "foo", ">") @@ -304,7 +298,7 @@ pub trait StreamsInterface: ClientLike + Sized { /// } /// ``` // The underlying issue here isn't so much a semantic difference between RESP2 and RESP3, but rather an assumption - // that went into the logic behind the `FromRedis` trait. + // that went into the logic behind the `FromValue` trait. // // In all other Redis commands that return "maps" in RESP2 (or responses that should be interpreted as maps) a map // is encoded as an array with an even number of elements representing `(key, value)` pairs. @@ -360,12 +354,12 @@ pub trait StreamsInterface: ClientLike + Sized { // 2) "6" // ``` // - // The underlying functions that do the RESP2 vs RESP3 conversion are public for callers as well, so one could use a - // `BTreeMap` instead of a `HashMap` like so: + // If it helps, the underlying functions that do the RESP2 vs RESP3 conversion are public for callers as well, so + // one could use a `BTreeMap` instead of a `HashMap` like so: // // ``` // let value: BTreeMap)>> = client - // .xread::(None, None, "foo", "0") + // .xread::(None, None, "foo", "0") // .await? // .flatten_array_values(2) // .convert()?; @@ -376,12 +370,12 @@ pub trait StreamsInterface: ClientLike + Sized { block: Option, keys: K, ids: I, - ) -> impl Future>> + Send + ) -> impl Future>> + Send where - Rk1: FromRedisKey + Hash + Eq, - Rk2: FromRedis, - Rk3: FromRedisKey + Hash + Eq, - Rv: FromRedis, + Rk1: FromKey + Hash + Eq, + Rk2: FromValue, + Rk3: FromKey + Hash + Eq, + Rv: FromValue, K: Into + Send, I: Into + Send, { @@ -406,9 +400,9 @@ pub trait StreamsInterface: ClientLike + Sized { block: Option, keys: K, ids: I, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, K: Into + Send, I: Into + Send, { @@ -427,10 +421,10 @@ pub trait StreamsInterface: ClientLike + Sized { groupname: S, id: I, mkstream: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, S: Into + Send, I: Into + Send, { @@ -450,10 +444,10 @@ pub trait StreamsInterface: ClientLike + Sized { key: K, groupname: G, consumername: C, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, G: Into + Send, C: Into + Send, { @@ -473,10 +467,10 @@ pub trait StreamsInterface: ClientLike + Sized { key: K, groupname: G, consumername: C, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, G: Into + Send, C: Into + Send, { @@ -491,10 +485,10 @@ pub trait StreamsInterface: ClientLike + Sized { /// Completely destroy a consumer group. /// /// - fn xgroup_destroy(&self, key: K, groupname: S) -> impl Future> + Send + fn xgroup_destroy(&self, key: K, groupname: S) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, S: Into + Send, { async move { @@ -506,10 +500,10 @@ pub trait StreamsInterface: ClientLike + Sized { /// Set the last delivered ID for a consumer group. /// /// - fn xgroup_setid(&self, key: K, groupname: S, id: I) -> impl Future> + Send + fn xgroup_setid(&self, key: K, groupname: S, id: I) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, S: Into + Send, I: Into + Send, { @@ -532,8 +526,8 @@ pub trait StreamsInterface: ClientLike + Sized { /// The `XREAD` and `XREADGROUP` commands return values that can be interpreted differently in RESP2 and RESP3 mode. /// In many cases it is also easier to operate on the return values of these functions as a `HashMap`, but /// manually declaring this type can be very verbose. This function will automatically convert the response to the - /// [most common](crate::types::XReadResponse) map representation while also handling the encoding differences - /// between RESP2 and RESP3. + /// [most common](crate::types::streams::XReadResponse) map representation while also handling the encoding + /// differences between RESP2 and RESP3. /// /// See the [xread_map](Self::xread_map) documentation for more information. // See the `xread_map` source docs for more information. @@ -546,12 +540,12 @@ pub trait StreamsInterface: ClientLike + Sized { noack: bool, keys: K, ids: I, - ) -> impl Future>> + Send + ) -> impl Future>> + Send where - Rk1: FromRedisKey + Hash + Eq, - Rk2: FromRedis, - Rk3: FromRedisKey + Hash + Eq, - Rv: FromRedis, + Rk1: FromKey + Hash + Eq, + Rk2: FromValue, + Rk3: FromKey + Hash + Eq, + Rv: FromValue, G: Into + Send, C: Into + Send, K: Into + Send, @@ -583,9 +577,9 @@ pub trait StreamsInterface: ClientLike + Sized { noack: bool, keys: K, ids: I, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, G: Into + Send, C: Into + Send, K: Into + Send, @@ -602,10 +596,10 @@ pub trait StreamsInterface: ClientLike + Sized { /// Remove one or more messages from the Pending Entries List (PEL) of a stream consumer group. /// /// - fn xack(&self, key: K, group: G, ids: I) -> impl Future> + Send + fn xack(&self, key: K, group: G, ids: I) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, G: Into + Send, I: Into + Send, { @@ -628,12 +622,12 @@ pub trait StreamsInterface: ClientLike + Sized { retry_count: Option, force: bool, justid: bool, - ) -> impl Future>>> + Send + ) -> impl Future>>> + Send where - Ri: FromRedis, - Rk: FromRedisKey + Hash + Eq, - Rv: FromRedis, - K: Into + Send, + Ri: FromValue, + Rk: FromKey + Hash + Eq, + Rv: FromValue, + K: Into + Send, G: Into + Send, C: Into + Send, I: Into + Send, @@ -676,10 +670,10 @@ pub trait StreamsInterface: ClientLike + Sized { retry_count: Option, force: bool, justid: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, G: Into + Send, C: Into + Send, I: Into + Send, @@ -718,12 +712,12 @@ pub trait StreamsInterface: ClientLike + Sized { start: I, count: Option, justid: bool, - ) -> impl Future>)>> + Send + ) -> impl Future>)>> + Send where - Ri: FromRedis, - Rk: FromRedisKey + Hash + Eq, - Rv: FromRedis, - K: Into + Send, + Ri: FromValue, + Rk: FromKey + Hash + Eq, + Rv: FromValue, + K: Into + Send, G: Into + Send, C: Into + Send, I: Into + Send, @@ -751,10 +745,10 @@ pub trait StreamsInterface: ClientLike + Sized { start: I, count: Option, justid: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, G: Into + Send, C: Into + Send, I: Into + Send, @@ -770,10 +764,10 @@ pub trait StreamsInterface: ClientLike + Sized { /// Inspect the list of pending messages in a consumer group. /// /// - fn xpending(&self, key: K, group: G, args: A) -> impl Future> + Send + fn xpending(&self, key: K, group: G, args: A) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, G: Into + Send, A: Into + Send, { diff --git a/src/commands/interfaces/timeseries.rs b/src/commands/interfaces/timeseries.rs index 9e907d27..9fcdeb74 100644 --- a/src/commands/interfaces/timeseries.rs +++ b/src/commands/interfaces/timeseries.rs @@ -1,18 +1,20 @@ use crate::{ commands, interfaces::ClientLike, - prelude::{RedisError, RedisKey, RedisResult}, + prelude::{Error, FredResult, Key}, types::{ - Aggregator, - DuplicatePolicy, - Encoding, - FromRedis, - GetLabels, - GetTimestamp, - GroupBy, - RangeAggregation, - RedisMap, - Timestamp, + timeseries::{ + Aggregator, + DuplicatePolicy, + Encoding, + GetLabels, + GetTimestamp, + GroupBy, + RangeAggregation, + Timestamp, + }, + FromValue, + Map, }, }; use bytes_utils::Str; @@ -36,14 +38,14 @@ pub trait TimeSeriesInterface: ClientLike { chunk_size: Option, on_duplicate: Option, labels: L, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, T: TryInto + Send, - T::Error: Into + Send, - L: TryInto + Send, - L::Error: Into, + T::Error: Into + Send, + L: TryInto + Send, + L::Error: Into, { async move { into!(key); @@ -74,12 +76,12 @@ pub trait TimeSeriesInterface: ClientLike { chunk_size: Option, duplicate_policy: Option, labels: L, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - L: TryInto + Send, - L::Error: Into, + R: FromValue, + K: Into + Send, + L: TryInto + Send, + L::Error: Into, { async move { into!(key); @@ -101,12 +103,12 @@ pub trait TimeSeriesInterface: ClientLike { chunk_size: Option, duplicate_policy: Option, labels: L, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - L: TryInto + Send, - L::Error: Into, + R: FromValue, + K: Into + Send, + L: TryInto + Send, + L::Error: Into, { async move { into!(key); @@ -126,11 +128,11 @@ pub trait TimeSeriesInterface: ClientLike { dest: D, aggregation: (Aggregator, u64), align_timestamp: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, { async move { into!(src, dest); @@ -153,12 +155,12 @@ pub trait TimeSeriesInterface: ClientLike { uncompressed: bool, chunk_size: Option, labels: L, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - L: TryInto + Send, - L::Error: Into + Send, + R: FromValue, + K: Into + Send, + L: TryInto + Send, + L::Error: Into + Send, { async move { into!(key); @@ -181,10 +183,10 @@ pub trait TimeSeriesInterface: ClientLike { /// Delete all samples between two timestamps for a given time series. /// /// - fn ts_del(&self, key: K, from: i64, to: i64) -> impl Future> + Send + fn ts_del(&self, key: K, from: i64, to: i64) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -195,11 +197,11 @@ pub trait TimeSeriesInterface: ClientLike { /// Delete a compaction rule. /// /// - fn ts_deleterule(&self, src: S, dest: D) -> impl Future> + Send + fn ts_deleterule(&self, src: S, dest: D) -> impl Future> + Send where - R: FromRedis, - S: Into + Send, - D: Into + Send, + R: FromValue, + S: Into + Send, + D: Into + Send, { async move { into!(src, dest); @@ -210,10 +212,10 @@ pub trait TimeSeriesInterface: ClientLike { /// Get the sample with the highest timestamp from a given time series. /// /// - fn ts_get(&self, key: K, latest: bool) -> impl Future> + Send + fn ts_get(&self, key: K, latest: bool) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -234,12 +236,12 @@ pub trait TimeSeriesInterface: ClientLike { uncompressed: bool, chunk_size: Option, labels: L, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, - L: TryInto + Send, - L::Error: Into + Send, + R: FromValue, + K: Into + Send, + L: TryInto + Send, + L::Error: Into + Send, { async move { into!(key); @@ -262,10 +264,10 @@ pub trait TimeSeriesInterface: ClientLike { /// Return information and statistics for a time series. /// /// - fn ts_info(&self, key: K, debug: bool) -> impl Future> + Send + fn ts_info(&self, key: K, debug: bool) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, { async move { into!(key); @@ -276,10 +278,10 @@ pub trait TimeSeriesInterface: ClientLike { /// Append new samples to one or more time series. /// /// - fn ts_madd(&self, samples: I) -> impl Future> + Send + fn ts_madd(&self, samples: I) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, I: IntoIterator + Send, { async move { @@ -294,8 +296,8 @@ pub trait TimeSeriesInterface: ClientLike { /// Get the sample with the highest timestamp from each time series matching a specific filter. /// - /// See [Resp2TimeSeriesValues](crate::types::Resp2TimeSeriesValues) and - /// [Resp3TimeSeriesValues](crate::types::Resp3TimeSeriesValues) for more information. + /// See [Resp2TimeSeriesValues](crate::types::timeseries::Resp2TimeSeriesValues) and + /// [Resp3TimeSeriesValues](crate::types::timeseries::Resp3TimeSeriesValues) for more information. /// /// fn ts_mget( @@ -303,9 +305,9 @@ pub trait TimeSeriesInterface: ClientLike { latest: bool, labels: Option, filters: I, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, L: Into + Send, S: Into + Send, I: IntoIterator + Send, @@ -322,8 +324,8 @@ pub trait TimeSeriesInterface: ClientLike { /// Query a range across multiple time series by filters in the forward direction. /// - /// See [Resp2TimeSeriesValues](crate::types::Resp2TimeSeriesValues) and - /// [Resp3TimeSeriesValues](crate::types::Resp3TimeSeriesValues) for more information. + /// See [Resp2TimeSeriesValues](crate::types::timeseries::Resp2TimeSeriesValues) and + /// [Resp3TimeSeriesValues](crate::types::timeseries::Resp3TimeSeriesValues) for more information. /// /// fn ts_mrange( @@ -338,13 +340,13 @@ pub trait TimeSeriesInterface: ClientLike { aggregation: Option, filters: J, group_by: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, F: TryInto + Send, - F::Error: Into + Send, + F::Error: Into + Send, T: TryInto + Send, - T::Error: Into + Send, + T::Error: Into + Send, S: Into + Send, I: IntoIterator + Send, J: IntoIterator + Send, @@ -374,8 +376,8 @@ pub trait TimeSeriesInterface: ClientLike { /// Query a range across multiple time series by filters in the reverse direction. /// - /// See [Resp2TimeSeriesValues](crate::types::Resp2TimeSeriesValues) and - /// [Resp3TimeSeriesValues](crate::types::Resp3TimeSeriesValues) for more information. + /// See [Resp2TimeSeriesValues](crate::types::timeseries::Resp2TimeSeriesValues) and + /// [Resp3TimeSeriesValues](crate::types::timeseries::Resp3TimeSeriesValues) for more information. /// /// fn ts_mrevrange( @@ -390,13 +392,13 @@ pub trait TimeSeriesInterface: ClientLike { aggregation: Option, filters: J, group_by: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, + R: FromValue, F: TryInto + Send, - F::Error: Into + Send, + F::Error: Into + Send, T: TryInto + Send, - T::Error: Into + Send, + T::Error: Into + Send, S: Into + Send, I: IntoIterator + Send, J: IntoIterator + Send, @@ -427,9 +429,9 @@ pub trait TimeSeriesInterface: ClientLike { /// Get all time series keys matching a filter list. /// /// - fn ts_queryindex(&self, filters: I) -> impl Future> + Send + fn ts_queryindex(&self, filters: I) -> impl Future> + Send where - R: FromRedis, + R: FromValue, S: Into + Send, I: IntoIterator + Send, { @@ -452,14 +454,14 @@ pub trait TimeSeriesInterface: ClientLike { filter_by_value: Option<(i64, i64)>, count: Option, aggregation: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: TryInto + Send, - F::Error: Into + Send, + F::Error: Into + Send, T: TryInto + Send, - T::Error: Into + Send, + T::Error: Into + Send, I: IntoIterator + Send, { async move { @@ -496,14 +498,14 @@ pub trait TimeSeriesInterface: ClientLike { filter_by_value: Option<(i64, i64)>, count: Option, aggregation: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where - R: FromRedis, - K: Into + Send, + R: FromValue, + K: Into + Send, F: TryInto + Send, - F::Error: Into + Send, + F::Error: Into + Send, T: TryInto + Send, - T::Error: Into + Send, + T::Error: Into + Send, I: IntoIterator + Send, { async move { diff --git a/src/commands/interfaces/tracking.rs b/src/commands/interfaces/tracking.rs index 666b4b2a..e5d440e9 100644 --- a/src/commands/interfaces/tracking.rs +++ b/src/commands/interfaces/tracking.rs @@ -1,9 +1,9 @@ use crate::{ commands, interfaces::ClientLike, - prelude::RedisResult, + prelude::FredResult, runtime::{spawn, BroadcastReceiver, JoinHandle}, - types::{Invalidation, MultipleStrings}, + types::{client::Invalidation, MultipleStrings}, }; use fred_macros::rm_send_if; use futures::Future; @@ -26,7 +26,7 @@ pub trait TrackingInterface: ClientLike + Sized { optin: bool, optout: bool, noloop: bool, - ) -> impl Future> + Send + ) -> impl Future> + Send where P: Into + Send, { @@ -37,16 +37,16 @@ pub trait TrackingInterface: ClientLike + Sized { } /// Disable client tracking on all connections. - fn stop_tracking(&self) -> impl Future> + Send { + fn stop_tracking(&self) -> impl Future> + Send { async move { commands::tracking::stop_tracking(self).await } } /// Spawn a task that processes invalidation messages from the server. /// /// See [invalidation_rx](Self::invalidation_rx) for a more flexible variation of this function. - fn on_invalidation(&self, func: F) -> JoinHandle> + fn on_invalidation(&self, func: F) -> JoinHandle> where - F: Fn(Invalidation) -> RedisResult<()> + Send + 'static, + F: Fn(Invalidation) -> FredResult<()> + Send + 'static, { let mut invalidation_rx = self.invalidation_rx(); diff --git a/src/error.rs b/src/error.rs index 43eac000..2867060e 100644 --- a/src/error.rs +++ b/src/error.rs @@ -5,7 +5,6 @@ use semver::Error as SemverError; use std::{ borrow::{Borrow, Cow}, convert::Infallible, - error::Error, fmt, io::Error as IoError, num::{ParseFloatError, ParseIntError}, @@ -15,14 +14,16 @@ use std::{ }; use url::ParseError; -/// An enum representing the type of error from Redis. +/// An enum representing the type of error. #[derive(Debug, Clone, Eq, PartialEq)] -pub enum RedisErrorKind { - /// A fatal client configuration error. These errors will shutdown a client and break out of any reconnection +pub enum ErrorKind { + /// A fatal client configuration error. These errors will shut down a client and break out of any reconnection /// attempts. Config, /// An authentication error. Auth, + /// An error finding a server that should receive a command. + Routing, /// An IO error with the underlying connection. IO, /// An invalid command, such as trying to perform a `set` command on a client after calling `subscribe`. @@ -72,223 +73,219 @@ pub enum RedisErrorKind { Replica, } -impl RedisErrorKind { +impl ErrorKind { pub fn to_str(&self) -> &'static str { match *self { - RedisErrorKind::Auth => "Authentication Error", - RedisErrorKind::IO => "IO Error", - RedisErrorKind::InvalidArgument => "Invalid Argument", - RedisErrorKind::InvalidCommand => "Invalid Command", - RedisErrorKind::Url => "Url Error", - RedisErrorKind::Protocol => "Protocol Error", - RedisErrorKind::Unknown => "Unknown Error", - RedisErrorKind::Canceled => "Canceled", - RedisErrorKind::Cluster => "Cluster Error", - RedisErrorKind::Timeout => "Timeout Error", + ErrorKind::Auth => "Authentication Error", + ErrorKind::IO => "IO Error", + ErrorKind::Routing => "Routing Error", + ErrorKind::InvalidArgument => "Invalid Argument", + ErrorKind::InvalidCommand => "Invalid Command", + ErrorKind::Url => "Url Error", + ErrorKind::Protocol => "Protocol Error", + ErrorKind::Unknown => "Unknown Error", + ErrorKind::Canceled => "Canceled", + ErrorKind::Cluster => "Cluster Error", + ErrorKind::Timeout => "Timeout Error", #[cfg(any( feature = "enable-native-tls", feature = "enable-rustls", feature = "enable-rustls-ring" ))] - RedisErrorKind::Tls => "TLS Error", - RedisErrorKind::Config => "Config Error", - RedisErrorKind::Parse => "Parse Error", - RedisErrorKind::Sentinel => "Sentinel Error", - RedisErrorKind::NotFound => "Not Found", - RedisErrorKind::Backpressure => "Backpressure", + ErrorKind::Tls => "TLS Error", + ErrorKind::Config => "Config Error", + ErrorKind::Parse => "Parse Error", + ErrorKind::Sentinel => "Sentinel Error", + ErrorKind::NotFound => "Not Found", + ErrorKind::Backpressure => "Backpressure", #[cfg(feature = "replicas")] - RedisErrorKind::Replica => "Replica", + ErrorKind::Replica => "Replica", } } } -/// An error from Redis. -pub struct RedisError { +/// An error from the server or client. +#[derive(Debug)] +pub struct Error { /// Details about the specific error condition. details: Cow<'static, str>, /// The kind of error. - kind: RedisErrorKind, + kind: ErrorKind, } -impl Clone for RedisError { +impl Clone for Error { fn clone(&self) -> Self { - RedisError::new(self.kind.clone(), self.details.clone()) + Error::new(self.kind.clone(), self.details.clone()) } } -impl PartialEq for RedisError { +impl PartialEq for Error { fn eq(&self, other: &Self) -> bool { self.kind == other.kind && self.details == other.details } } -impl Eq for RedisError {} +impl Eq for Error {} -impl fmt::Debug for RedisError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Redis Error - kind: {:?}, details: {}", self.kind, self.details) - } -} - -impl fmt::Display for RedisError { +impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}: {}", self.kind.to_str(), self.details) } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: RedisProtocolError) -> Self { - RedisError::new(RedisErrorKind::Protocol, format!("{}", e)) + Error::new(ErrorKind::Protocol, format!("{}", e)) } } #[doc(hidden)] -impl From<()> for RedisError { +impl From<()> for Error { fn from(_: ()) -> Self { - RedisError::new(RedisErrorKind::Canceled, "Empty error.") + Error::new(ErrorKind::Canceled, "Empty error.") } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: futures::channel::mpsc::SendError) -> Self { - RedisError::new(RedisErrorKind::Unknown, format!("{}", e)) + Error::new(ErrorKind::Unknown, format!("{}", e)) } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: tokio::sync::oneshot::error::RecvError) -> Self { - RedisError::new(RedisErrorKind::Unknown, format!("{}", e)) + Error::new(ErrorKind::Unknown, format!("{}", e)) } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: tokio::sync::broadcast::error::RecvError) -> Self { - RedisError::new(RedisErrorKind::Unknown, format!("{}", e)) + Error::new(ErrorKind::Unknown, format!("{}", e)) } } #[doc(hidden)] -impl From> for RedisError { +impl From> for Error { fn from(e: tokio::sync::broadcast::error::SendError) -> Self { - RedisError::new(RedisErrorKind::Unknown, format!("{}", e)) + Error::new(ErrorKind::Unknown, format!("{}", e)) } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: IoError) -> Self { - RedisError::new(RedisErrorKind::IO, format!("{:?}", e)) + Error::new(ErrorKind::IO, format!("{:?}", e)) } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: ParseError) -> Self { - RedisError::new(RedisErrorKind::Url, format!("{:?}", e)) + Error::new(ErrorKind::Url, format!("{:?}", e)) } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(_: ParseFloatError) -> Self { - RedisError::new(RedisErrorKind::Parse, "Invalid floating point number.") + Error::new(ErrorKind::Parse, "Invalid floating point number.") } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(_: ParseIntError) -> Self { - RedisError::new(RedisErrorKind::Parse, "Invalid integer string.") + Error::new(ErrorKind::Parse, "Invalid integer string.") } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(_: FromUtf8Error) -> Self { - RedisError::new(RedisErrorKind::Parse, "Invalid UTF-8 string.") + Error::new(ErrorKind::Parse, "Invalid UTF-8 string.") } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(_: Utf8Error) -> Self { - RedisError::new(RedisErrorKind::Parse, "Invalid UTF-8 string.") + Error::new(ErrorKind::Parse, "Invalid UTF-8 string.") } } #[doc(hidden)] -impl From> for RedisError { +impl From> for Error { fn from(e: BytesUtf8Error) -> Self { e.utf8_error().into() } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: fmt::Error) -> Self { - RedisError::new(RedisErrorKind::Unknown, format!("{:?}", e)) + Error::new(ErrorKind::Unknown, format!("{:?}", e)) } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: Canceled) -> Self { - RedisError::new(RedisErrorKind::Canceled, format!("{}", e)) + Error::new(ErrorKind::Canceled, format!("{}", e)) } } #[doc(hidden)] #[cfg(not(feature = "glommio"))] -impl From for RedisError { +impl From for Error { fn from(e: tokio::task::JoinError) -> Self { - RedisError::new(RedisErrorKind::Unknown, format!("Spawn Error: {:?}", e)) + Error::new(ErrorKind::Unknown, format!("Spawn Error: {:?}", e)) } } #[doc(hidden)] #[cfg(feature = "glommio")] -impl From> for RedisError { +impl From> for Error { fn from(e: glommio::GlommioError) -> Self { - RedisError::new(RedisErrorKind::Unknown, format!("{:?}", e)) + Error::new(ErrorKind::Unknown, format!("{:?}", e)) } } #[doc(hidden)] #[cfg(feature = "glommio")] -impl From for RedisError { +impl From for Error { fn from(_: oneshot::RecvError) -> Self { - RedisError::new_canceled() + Error::new_canceled() } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: SemverError) -> Self { - RedisError::new(RedisErrorKind::Protocol, format!("Invalid Redis version: {:?}", e)) + Error::new(ErrorKind::Protocol, format!("Invalid server version: {:?}", e)) } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: Infallible) -> Self { warn!("Infallible error: {:?}", e); - RedisError::new(RedisErrorKind::Unknown, "Unknown error.") + Error::new(ErrorKind::Unknown, "Unknown error.") } } #[doc(hidden)] -impl From for RedisError { +impl From for Error { fn from(e: Resp2Frame) -> Self { match e { Resp2Frame::SimpleString(s) => match str::from_utf8(&s).ok() { - Some("Canceled") => RedisError::new_canceled(), - _ => RedisError::new(RedisErrorKind::Unknown, "Unknown frame error."), + Some("Canceled") => Error::new_canceled(), + _ => Error::new(ErrorKind::Unknown, "Unknown frame error."), }, - _ => RedisError::new(RedisErrorKind::Unknown, "Unknown frame error."), + _ => Error::new(ErrorKind::Unknown, "Unknown frame error."), } } } @@ -296,75 +293,75 @@ impl From for RedisError { #[doc(hidden)] #[cfg(feature = "enable-native-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "enable-native-tls")))] -impl From for RedisError { +impl From for Error { fn from(e: native_tls::Error) -> Self { - RedisError::new(RedisErrorKind::Tls, format!("{:?}", e)) + Error::new(ErrorKind::Tls, format!("{:?}", e)) } } #[doc(hidden)] #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))))] -impl From for RedisError { +impl From for Error { fn from(e: rustls::pki_types::InvalidDnsNameError) -> Self { - RedisError::new(RedisErrorKind::Tls, format!("{:?}", e)) + Error::new(ErrorKind::Tls, format!("{:?}", e)) } } #[doc(hidden)] #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))))] -impl From for RedisError { +impl From for Error { fn from(e: rustls::Error) -> Self { - RedisError::new(RedisErrorKind::Tls, format!("{:?}", e)) + Error::new(ErrorKind::Tls, format!("{:?}", e)) } } #[doc(hidden)] #[cfg(feature = "trust-dns-resolver")] #[cfg_attr(docsrs, doc(cfg(feature = "trust-dns-resolver")))] -impl From for RedisError { +impl From for Error { fn from(e: trust_dns_resolver::error::ResolveError) -> Self { - RedisError::new(RedisErrorKind::IO, format!("{:?}", e)) + Error::new(ErrorKind::IO, format!("{:?}", e)) } } #[doc(hidden)] #[cfg(feature = "dns")] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] -impl From for RedisError { +impl From for Error { fn from(e: hickory_resolver::error::ResolveError) -> Self { - RedisError::new(RedisErrorKind::IO, format!("{:?}", e)) + Error::new(ErrorKind::IO, format!("{:?}", e)) } } #[cfg(feature = "serde-json")] #[cfg_attr(docsrs, doc(cfg(feature = "serde-json")))] -impl From for RedisError { +impl From for Error { fn from(e: serde_json::Error) -> Self { - RedisError::new(RedisErrorKind::Parse, format!("{:?}", e)) + Error::new(ErrorKind::Parse, format!("{:?}", e)) } } -impl RedisError { - /// Create a new Redis error with the provided details. - pub fn new(kind: RedisErrorKind, details: T) -> RedisError +impl Error { + /// Create a new error with the provided details. + pub fn new(kind: ErrorKind, details: T) -> Error where T: Into>, { - RedisError { + Error { kind, details: details.into(), } } /// Read the type of error without any associated data. - pub fn kind(&self) -> &RedisErrorKind { + pub fn kind(&self) -> &ErrorKind { &self.kind } /// Change the kind of the error. - pub fn change_kind(&mut self, kind: RedisErrorKind) { + pub fn change_kind(&mut self, kind: ErrorKind) { self.kind = kind; } @@ -375,7 +372,7 @@ impl RedisError { /// Create a new empty Canceled error. pub fn new_canceled() -> Self { - RedisError::new(RedisErrorKind::Canceled, "Canceled.") + Error::new(ErrorKind::Canceled, "Canceled.") } /// Create a new parse error with the provided details. @@ -383,44 +380,54 @@ impl RedisError { where T: Into>, { - RedisError::new(RedisErrorKind::Parse, details) + Error::new(ErrorKind::Parse, details) } /// Create a new default backpressure error. pub(crate) fn new_backpressure() -> Self { - RedisError::new(RedisErrorKind::Backpressure, "Max in-flight commands reached.") + Error::new(ErrorKind::Backpressure, "Max in-flight commands reached.") } /// Whether reconnection logic should be skipped in all cases. pub(crate) fn should_not_reconnect(&self) -> bool { - matches!(self.kind, RedisErrorKind::Config | RedisErrorKind::Url) + matches!(self.kind, ErrorKind::Config | ErrorKind::Url) } /// Whether the error is a `Cluster` error. pub fn is_cluster(&self) -> bool { - matches!(self.kind, RedisErrorKind::Cluster) + matches!(self.kind, ErrorKind::Cluster) } /// Whether the error is a `Canceled` error. pub fn is_canceled(&self) -> bool { - matches!(self.kind, RedisErrorKind::Canceled) + matches!(self.kind, ErrorKind::Canceled) } /// Whether the error is a `Replica` error. #[cfg(feature = "replicas")] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] pub fn is_replica(&self) -> bool { - matches!(self.kind, RedisErrorKind::Replica) + matches!(self.kind, ErrorKind::Replica) } /// Whether the error is a `NotFound` error. pub fn is_not_found(&self) -> bool { - matches!(self.kind, RedisErrorKind::NotFound) + matches!(self.kind, ErrorKind::NotFound) + } + + /// Whether the error is a MOVED redirection. + pub fn is_moved(&self) -> bool { + self.is_cluster() && self.details.starts_with("MOVED") + } + + /// Whether the error is an ASK redirection. + pub fn is_ask(&self) -> bool { + self.is_cluster() && self.details.starts_with("ASK") } } -impl Error for RedisError { - fn source(&self) -> Option<&(dyn Error + 'static)> { +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { None } } diff --git a/src/interfaces.rs b/src/interfaces.rs index 16865734..7d6733ae 100644 --- a/src/interfaces.rs +++ b/src/interfaces.rs @@ -1,10 +1,12 @@ +pub(crate) use crate::runtime::spawn_event_listener; +pub use crate::runtime::ClientLike; use crate::{ commands, - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, - protocol::command::{RedisCommand, RouterCommand}, + error::{Error, ErrorKind}, + modules::inner::ClientInner, + protocol::command::{Command, RouterCommand}, runtime::{sleep, spawn, BroadcastReceiver, JoinHandle, RefCount}, - types::{ClientState, ClusterStateChange, KeyspaceEvent, Message, RespVersion, Server}, + types::{config::Server, ClientState, ClusterStateChange, KeyspaceEvent, Message, RespVersion}, utils, }; use bytes_utils::Str; @@ -13,18 +15,15 @@ use futures::Future; pub use redis_protocol::resp3::types::BytesFrame as Resp3Frame; use std::time::Duration; -pub(crate) use crate::runtime::spawn_event_listener; -pub use crate::runtime::ClientLike; - -/// Type alias for `Result`. -pub type RedisResult = Result; +/// Type alias for `Result`. +pub type FredResult = Result; -/// Send a single `RedisCommand` to the router. -pub(crate) fn default_send_command(inner: &RefCount, command: C) -> Result<(), RedisError> +/// Send a single `Command` to the router. +pub(crate) fn default_send_command(inner: &RefCount, command: C) -> Result<(), Error> where - C: Into, + C: Into, { - let mut command: RedisCommand = command.into(); + let mut command: Command = command.into(); _trace!( inner, "Sending command {} ({}) to router.", @@ -37,33 +36,17 @@ where } /// Send a `RouterCommand` to the router. -pub(crate) fn send_to_router(inner: &RefCount, command: RouterCommand) -> Result<(), RedisError> { +pub(crate) fn send_to_router(inner: &RefCount, command: RouterCommand) -> Result<(), Error> { #[allow(clippy::collapsible_if)] if command.should_check_fail_fast() { if utils::read_locked(&inner.state) != ClientState::Connected { _debug!(inner, "Responding early after fail fast check."); - command.finish_with_error(RedisError::new( - RedisErrorKind::Canceled, - "Connection closed unexpectedly.", - )); + command.finish_with_error(Error::new(ErrorKind::Canceled, "Connection closed unexpectedly.")); return Ok(()); } } - let new_len = inner.counters.incr_cmd_buffer_len(); - let should_apply_backpressure = inner.connection.max_command_buffer_len > 0 - && new_len > inner.connection.max_command_buffer_len - && !command.should_skip_backpressure(); - - if should_apply_backpressure { - inner.counters.decr_cmd_buffer_len(); - command.finish_with_error(RedisError::new( - RedisErrorKind::Backpressure, - "Max command queue length exceeded.", - )); - return Ok(()); - } - + inner.counters.incr_cmd_buffer_len(); if let Err(e) = inner.send_command(command) { // usually happens if the caller tries to send a command before calling `connect` or after calling `quit` inner.counters.decr_cmd_buffer_len(); @@ -75,10 +58,7 @@ pub(crate) fn send_to_router(inner: &RefCount, command: Router command.kind.to_str_debug() ); - command.respond_to_caller(Err(RedisError::new( - RedisErrorKind::Unknown, - "Client is not initialized.", - ))); + command.respond_to_caller(Err(Error::new(ErrorKind::Unknown, "Client is not initialized."))); } else { _warn!( inner, @@ -86,10 +66,7 @@ pub(crate) fn send_to_router(inner: &RefCount, command: Router ); } - Err(RedisError::new( - RedisErrorKind::Unknown, - "Failed to send command to router.", - )) + Err(Error::new(ErrorKind::Unknown, "Failed to send command to router.")) } else { Ok(()) } @@ -104,7 +81,7 @@ pub trait HeartbeatInterface: ClientLike { &self, interval: Duration, break_on_error: bool, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { let _self = self.clone(); @@ -112,8 +89,8 @@ pub trait HeartbeatInterface: ClientLike { sleep(interval).await; if break_on_error { - let _: () = _self.ping().await?; - } else if let Err(e) = _self.ping::<()>().await { + let _: () = _self.ping(None).await?; + } else if let Err(e) = _self.ping::<()>(None).await { warn!("{}: Heartbeat ping failed with error: {:?}", _self.inner().id, e); } } @@ -126,15 +103,15 @@ pub trait HeartbeatInterface: ClientLike { /// Functions for authenticating clients. #[rm_send_if(feature = "glommio")] pub trait AuthInterface: ClientLike { - /// Request for authentication in a password-protected Redis server. Returns ok if successful. + /// Request for authentication in a password-protected server. Returns ok if successful. /// /// The client will automatically authenticate with the default user if a password is provided in the associated - /// `RedisConfig` when calling [connect](crate::interfaces::ClientLike::connect). + /// `Config` when calling [connect](crate::interfaces::ClientLike::connect). /// /// If running against clustered servers this function will authenticate all connections. /// /// - fn auth(&self, username: Option, password: S) -> impl Future> + Send + fn auth(&self, username: Option, password: S) -> impl Future> + Send where S: Into + Send, { @@ -154,7 +131,7 @@ pub trait AuthInterface: ClientLike { version: RespVersion, auth: Option<(Str, Str)>, setname: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { commands::server::hello(self, version, auth, setname).await } } } @@ -167,9 +144,10 @@ pub trait EventInterface: ClientLike { /// Spawn a task that runs the provided function on each publish-subscribe message. /// /// See [message_rx](Self::message_rx) for more information. - fn on_message(&self, func: F) -> JoinHandle> + fn on_message(&self, func: F) -> JoinHandle> where - F: Fn(Message) -> RedisResult<()> + Send + 'static, + Fut: Future> + Send + 'static, + F: Fn(Message) -> Fut + Send + 'static, { let rx = self.message_rx(); spawn_event_listener(rx, func) @@ -178,9 +156,10 @@ pub trait EventInterface: ClientLike { /// Spawn a task that runs the provided function on each keyspace event. /// /// - fn on_keyspace_event(&self, func: F) -> JoinHandle> + fn on_keyspace_event(&self, func: F) -> JoinHandle> where - F: Fn(KeyspaceEvent) -> RedisResult<()> + Send + 'static, + Fut: Future> + Send + 'static, + F: Fn(KeyspaceEvent) -> Fut + Send + 'static, { let rx = self.keyspace_event_rx(); spawn_event_listener(rx, func) @@ -189,9 +168,10 @@ pub trait EventInterface: ClientLike { /// Spawn a task that runs the provided function on each reconnection event. /// /// Errors returned by `func` will exit the task. - fn on_reconnect(&self, func: F) -> JoinHandle> + fn on_reconnect(&self, func: F) -> JoinHandle> where - F: Fn(Server) -> RedisResult<()> + Send + 'static, + Fut: Future> + Send + 'static, + F: Fn(Server) -> Fut + Send + 'static, { let rx = self.reconnect_rx(); spawn_event_listener(rx, func) @@ -200,9 +180,10 @@ pub trait EventInterface: ClientLike { /// Spawn a task that runs the provided function on each cluster change event. /// /// Errors returned by `func` will exit the task. - fn on_cluster_change(&self, func: F) -> JoinHandle> + fn on_cluster_change(&self, func: F) -> JoinHandle> where - F: Fn(Vec) -> RedisResult<()> + Send + 'static, + Fut: Future> + Send + 'static, + F: Fn(Vec) -> Fut + Send + 'static, { let rx = self.cluster_change_rx(); spawn_event_listener(rx, func) @@ -211,18 +192,20 @@ pub trait EventInterface: ClientLike { /// Spawn a task that runs the provided function on each connection error event. /// /// Errors returned by `func` will exit the task. - fn on_error(&self, func: F) -> JoinHandle> + fn on_error(&self, func: F) -> JoinHandle> where - F: Fn(RedisError) -> RedisResult<()> + Send + 'static, + Fut: Future> + Send + 'static, + F: Fn((Error, Option)) -> Fut + Send + 'static, { let rx = self.error_rx(); spawn_event_listener(rx, func) } /// Spawn a task that runs the provided function whenever the client detects an unresponsive connection. - fn on_unresponsive(&self, func: F) -> JoinHandle> + fn on_unresponsive(&self, func: F) -> JoinHandle> where - F: Fn(Server) -> RedisResult<()> + Send + 'static, + Fut: Future> + Send + 'static, + F: Fn(Server) -> Fut + Send + 'static, { let rx = self.unresponsive_rx(); spawn_event_listener(rx, func) @@ -231,11 +214,19 @@ pub trait EventInterface: ClientLike { /// Spawn one task that listens for all connection management event types. /// /// Errors in any of the provided functions will exit the task. - fn on_any(&self, error_fn: Fe, reconnect_fn: Fr, cluster_change_fn: Fc) -> JoinHandle> + fn on_any( + &self, + error_fn: Fe, + reconnect_fn: Fr, + cluster_change_fn: Fc, + ) -> JoinHandle> where - Fe: Fn(RedisError) -> RedisResult<()> + Send + 'static, - Fr: Fn(Server) -> RedisResult<()> + Send + 'static, - Fc: Fn(Vec) -> RedisResult<()> + Send + 'static, + Fut1: Future> + Send + 'static, + Fut2: Future> + Send + 'static, + Fut3: Future> + Send + 'static, + Fe: Fn((Error, Option)) -> Fut1 + Send + 'static, + Fr: Fn(Server) -> Fut2 + Send + 'static, + Fc: Fn(Vec) -> Fut3 + Send + 'static, { let mut error_rx = self.error_rx(); let mut reconnect_rx = self.reconnect_rx(); @@ -247,20 +238,20 @@ pub trait EventInterface: ClientLike { loop { tokio::select! { - Ok(error) = error_rx.recv() => { - if let Err(err) = error_fn(error) { + Ok((error, server)) = error_rx.recv() => { + if let Err(err) = error_fn((error, server)).await { result = Err(err); break; } } Ok(server) = reconnect_rx.recv() => { - if let Err(err) = reconnect_fn(server) { + if let Err(err) = reconnect_fn(server).await { result = Err(err); break; } } Ok(changes) = cluster_rx.recv() => { - if let Err(err) = cluster_change_fn(changes) { + if let Err(err) = cluster_change_fn(changes).await { result = Err(err); break; } @@ -276,7 +267,7 @@ pub trait EventInterface: ClientLike { /// /// **Keyspace events are not sent on this interface.** /// - /// If the connection to the Redis server closes for any reason this function does not need to be called again. + /// If the connection to the server closes for any reason this function does not need to be called again. /// Messages will start appearing on the original stream after /// [subscribe](crate::interfaces::PubsubInterface::subscribe) is called again. fn message_rx(&self) -> BroadcastReceiver { @@ -313,7 +304,7 @@ pub trait EventInterface: ClientLike { /// Listen for protocol and connection errors. This stream can be used to more intelligently handle errors that may /// not appear in the request-response cycle, and so cannot be handled by response futures. - fn error_rx(&self) -> BroadcastReceiver { + fn error_rx(&self) -> BroadcastReceiver<(Error, Option)> { self.inner().notifications.errors.load().subscribe() } diff --git a/src/lib.rs b/src/lib.rs index 944f232d..f573e9b9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -76,8 +76,8 @@ mod runtime; /// Various client utility functions. pub mod util { - pub use crate::utils::{f64_to_redis_string, redis_string_to_f64, static_bytes, static_str}; - use crate::{error::RedisError, types::RedisKey}; + pub use crate::utils::{f64_to_string, static_bytes, static_str, string_to_f64}; + use crate::{error::Error, types::Key}; pub use redis_protocol::redis_keyslot; use std::collections::{BTreeMap, VecDeque}; @@ -107,7 +107,7 @@ pub mod util { /// /// ```rust /// # use fred::prelude::*; - /// async fn example(client: impl KeysInterface) -> Result<(), RedisError> { + /// async fn example(client: impl KeysInterface) -> Result<(), Error> { /// let keys = vec!["foo", "bar", "baz", "a{1}", "b{1}", "c{1}"]; /// let groups = fred::util::group_by_hash_slot(keys)?; /// @@ -118,17 +118,15 @@ pub mod util { /// Ok(()) /// } /// ``` - pub fn group_by_hash_slot( - args: impl IntoIterator, - ) -> Result>, RedisError> + pub fn group_by_hash_slot(args: impl IntoIterator) -> Result>, Error> where - T: TryInto, - T::Error: Into, + T: TryInto, + T::Error: Into, { let mut out = BTreeMap::new(); for arg in args.into_iter() { - let arg: RedisKey = to!(arg)?; + let arg: Key = to!(arg)?; let slot = redis_keyslot(arg.as_bytes()); out.entry(slot).or_insert(VecDeque::new()).push_back(arg); @@ -142,29 +140,33 @@ pub mod util { pub mod prelude { #[cfg(feature = "partial-tracing")] #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] - pub use crate::types::TracingConfig; + pub use crate::types::config::TracingConfig; pub use crate::{ - clients::{RedisClient, RedisPool}, - error::{RedisError, RedisErrorKind}, + clients::{Client, Pool}, + error::{Error, ErrorKind}, interfaces::*, types::{ - Blocking, + config::{ + Blocking, + Config, + ConnectionConfig, + Options, + PerformanceConfig, + ReconnectPolicy, + Server, + ServerConfig, + TcpConfig, + }, Builder, - ConnectionConfig, + ClientState, Expiration, - FromRedis, - Options, - PerformanceConfig, - ReconnectPolicy, - RedisConfig, - RedisKey, - RedisValue, - RedisValueKind, - Server, - ServerConfig, + FromKey, + FromValue, + Key, SetOptions, - TcpConfig, + Value, + ValueKind, }, }; @@ -181,5 +183,5 @@ pub mod prelude { feature = "enable-rustls-ring" ))) )] - pub use crate::types::{TlsConfig, TlsConnector}; + pub use crate::types::config::{TlsConfig, TlsConnector}; } diff --git a/src/macros.rs b/src/macros.rs index 26042c2b..da70ebaf 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -126,7 +126,7 @@ macro_rules! cmd( macro_rules! static_val( ($val:expr) => { - RedisValue::from_static_str($val) + Value::from_static_str($val) } ); diff --git a/src/modules/backchannel.rs b/src/modules/backchannel.rs index 962aef19..2126eb6e 100644 --- a/src/modules/backchannel.rs +++ b/src/modules/backchannel.rs @@ -1,133 +1,163 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, - protocol::{command::RedisCommand, connection, connection::RedisTransport, types::Server}, - router::Connections, - runtime::RefCount, + error::{Error, ErrorKind}, + modules::inner::ClientInner, + protocol::{command::Command, connection, connection::ExclusiveConnection, types::Server}, + router::connections::Connections, + runtime::{AsyncRwLock, RefCount}, utils, }; +use parking_lot::Mutex; use redis_protocol::resp3::types::BytesFrame as Resp3Frame; -use std::collections::HashMap; +use std::{ + collections::HashMap, + ops::{Deref, DerefMut}, +}; /// Check if an existing connection can be used to the provided `server`, otherwise create a new one. /// /// Returns whether a new connection was created. async fn check_and_create_transport( - backchannel: &mut Backchannel, - inner: &RefCount, + backchannel: &Backchannel, + inner: &RefCount, server: &Server, -) -> Result { - if let Some(ref mut transport) = backchannel.transport { +) -> Result { + let mut transport = backchannel.transport.write().await; + + if let Some(ref mut transport) = transport.deref_mut() { if &transport.server == server && transport.ping(inner).await.is_ok() { _debug!(inner, "Using existing backchannel connection to {}", server); return Ok(false); } } - backchannel.transport = None; + *transport.deref_mut() = None; - let mut transport = connection::create(inner, server, None).await?; - transport.setup(inner, None).await?; - backchannel.transport = Some(transport); + let mut _transport = connection::create(inner, server, None).await?; + _transport.setup(inner, None).await?; + *transport.deref_mut() = Some(_transport); Ok(true) } /// A struct wrapping a separate connection to the server or cluster for client or cluster management commands. -#[derive(Default)] pub struct Backchannel { /// A connection to any of the servers. - pub transport: Option, + pub transport: AsyncRwLock>, /// An identifier for the blocked connection, if any. - pub blocked: Option, + pub blocked: Mutex>, /// A map of server IDs to connection IDs, as managed by the router. - pub connection_ids: HashMap, + pub connection_ids: Mutex>, +} + +impl Default for Backchannel { + fn default() -> Self { + Backchannel { + transport: AsyncRwLock::new(None), + blocked: Mutex::new(None), + connection_ids: Mutex::new(HashMap::new()), + } + } } impl Backchannel { /// Check if the current server matches the provided server, and disconnect. // TODO does this need to disconnect whenever the caller manually changes the RESP protocol mode? - pub async fn check_and_disconnect(&mut self, inner: &RefCount, server: Option<&Server>) { + pub async fn check_and_disconnect(&self, inner: &RefCount, server: Option<&Server>) { let should_close = self .current_server() + .await .map(|current| server.map(|server| *server == current).unwrap_or(true)) .unwrap_or(false); if should_close { - if let Some(ref mut transport) = self.transport { + if let Some(ref mut transport) = self.transport.write().await.take() { let _ = transport.disconnect(inner).await; } - self.transport = None; + } + } + + /// Check if the provided server is marked as blocked, and if so remove it from the cache. + pub fn check_and_unblock(&self, server: &Server) { + let mut guard = self.blocked.lock(); + let matches = if let Some(blocked) = guard.as_ref() { + blocked == server + } else { + false + }; + + if matches { + *guard = None; } } /// Clear all local state that depends on the associated `Router` instance. - pub async fn clear_router_state(&mut self, inner: &RefCount) { - self.connection_ids.clear(); - self.blocked = None; + pub async fn clear_router_state(&self, inner: &RefCount) { + self.connection_ids.lock().clear(); + self.blocked.lock().take(); - if let Some(ref mut transport) = self.transport { + if let Some(ref mut transport) = self.transport.write().await.take() { let _ = transport.disconnect(inner).await; } - self.transport = None; } /// Set the connection IDs from the router. - pub fn update_connection_ids(&mut self, connections: &Connections) { - self.connection_ids = connections.connection_ids(); + pub fn update_connection_ids(&self, connections: &Connections) { + let mut guard = self.connection_ids.lock(); + *guard.deref_mut() = connections.connection_ids(); } /// Remove the provided server from the connection ID map. - pub fn remove_connection_id(&mut self, server: &Server) { - self.connection_ids.get(server); + pub fn remove_connection_id(&self, server: &Server) { + self.connection_ids.lock().get(server); } /// Read the connection ID for the provided server. pub fn connection_id(&self, server: &Server) -> Option { - self.connection_ids.get(server).cloned() + self.connection_ids.lock().get(server).cloned() } /// Set the blocked flag to the provided server. - pub fn set_blocked(&mut self, server: &Server) { - self.blocked = Some(server.clone()); + pub fn set_blocked(&self, server: &Server) { + self.blocked.lock().replace(server.clone()); } /// Remove the blocked flag. - pub fn set_unblocked(&mut self) { - self.blocked = None; + pub fn set_unblocked(&self) { + self.blocked.lock().take(); } /// Remove the blocked flag only if the server matches the blocked server. - pub fn check_and_set_unblocked(&mut self, server: &Server) { - let should_remove = self.blocked.as_ref().map(|blocked| blocked == server).unwrap_or(false); - if should_remove { - self.set_unblocked(); + pub fn check_and_set_unblocked(&self, server: &Server) { + let mut guard = self.blocked.lock(); + if guard.as_ref().map(|b| b == server).unwrap_or(false) { + guard.take(); } } /// Whether the client is blocked on a command. pub fn is_blocked(&self) -> bool { - self.blocked.is_some() + self.blocked.lock().is_some() } /// Whether an open connection exists to the blocked server. - pub fn has_blocked_transport(&self) -> bool { - match self.blocked { - Some(ref server) => match self.transport { - Some(ref transport) => &transport.server == server, + pub async fn has_blocked_transport(&self) -> bool { + if let Some(server) = self.blocked_server() { + match self.transport.read().await.deref() { + Some(ref transport) => transport.server == server, None => false, - }, - None => false, + } + } else { + false } } /// Return the server ID of the blocked client connection, if found. pub fn blocked_server(&self) -> Option { - self.blocked.clone() + self.blocked.lock().clone() } /// Return the server ID of the existing backchannel connection, if found. - pub fn current_server(&self) -> Option { - self.transport.as_ref().map(|t| t.server.clone()) + pub async fn current_server(&self) -> Option { + self.transport.read().await.as_ref().map(|t| t.server.clone()) } /// Return a server ID, with the following preferences: @@ -135,17 +165,19 @@ impl Backchannel { /// 1. The server ID of the existing connection, if any. /// 2. The blocked server ID, if any. /// 3. A random server ID from the router's connection map. - pub fn any_server(&self) -> Option { + pub async fn any_server(&self) -> Option { self .current_server() + .await .or(self.blocked_server()) - .or(self.connection_ids.keys().next().cloned()) + .or_else(|| self.connection_ids.lock().keys().next().cloned()) } /// Whether the existing connection is to the currently blocked server. - pub fn current_server_is_blocked(&self) -> bool { + pub async fn current_server_is_blocked(&self) -> bool { self .current_server() + .await .and_then(|server| self.blocked_server().map(|blocked| server == blocked)) .unwrap_or(false) } @@ -154,14 +186,14 @@ impl Backchannel { /// /// If a new connection is created this function also sets it on `self` before returning. pub async fn request_response( - &mut self, - inner: &RefCount, + &self, + inner: &RefCount, server: &Server, - command: RedisCommand, - ) -> Result { + command: Command, + ) -> Result { let _ = check_and_create_transport(self, inner, server).await?; - if let Some(ref mut transport) = self.transport { + if let Some(ref mut transport) = self.transport.write().await.deref_mut() { _debug!( inner, "Sending {} ({}) on backchannel to {}", @@ -176,8 +208,8 @@ impl Backchannel { ) .await } else { - Err(RedisError::new( - RedisErrorKind::Unknown, + Err(Error::new( + ErrorKind::Unknown, "Failed to create backchannel connection.", )) } @@ -192,45 +224,42 @@ impl Backchannel { /// will be used. /// * If a backchannel connection already exists then that will be used. /// * Failing all of the above a random server will be used. - pub fn find_server( + pub async fn find_server( &self, - inner: &RefCount, - command: &RedisCommand, + inner: &RefCount, + command: &Command, use_blocked: bool, - ) -> Result { + ) -> Result { if use_blocked { - if let Some(server) = self.blocked.as_ref() { + if let Some(server) = self.blocked.lock().deref() { Ok(server.clone()) } else { // should this be more relaxed? - Err(RedisError::new(RedisErrorKind::Unknown, "No connections are blocked.")) + Err(Error::new(ErrorKind::Unknown, "No connections are blocked.")) } } else if inner.config.server.is_clustered() { if command.kind.use_random_cluster_node() { self .any_server() - .ok_or_else(|| RedisError::new(RedisErrorKind::Unknown, "Failed to find backchannel server.")) + .await + .ok_or_else(|| Error::new(ErrorKind::Unknown, "Failed to find backchannel server.")) } else { inner.with_cluster_state(|state| { let slot = match command.cluster_hash() { Some(slot) => slot, - None => { - return Err(RedisError::new( - RedisErrorKind::Cluster, - "Failed to find cluster hash slot.", - )) - }, + None => return Err(Error::new(ErrorKind::Cluster, "Failed to find cluster hash slot.")), }; state .get_server(slot) .cloned() - .ok_or_else(|| RedisError::new(RedisErrorKind::Cluster, "Failed to find cluster owner.")) + .ok_or_else(|| Error::new(ErrorKind::Cluster, "Failed to find cluster owner.")) }) } } else { self .any_server() - .ok_or_else(|| RedisError::new(RedisErrorKind::Unknown, "Failed to find backchannel server.")) + .await + .ok_or_else(|| Error::new(ErrorKind::Unknown, "Failed to find backchannel server.")) } } } diff --git a/src/modules/inner.rs b/src/modules/inner.rs index d03ec299..1469645a 100644 --- a/src/modules/inner.rs +++ b/src/modules/inner.rs @@ -1,29 +1,36 @@ use crate::{ error::*, - interfaces, modules::backchannel::Backchannel, protocol::{ - command::{ResponseSender, RouterCommand}, - connection::RedisTransport, + command::RouterCommand, + connection::ExclusiveConnection, types::{ClusterRouting, DefaultResolver, Resolve, Server}, }, runtime::{ broadcast_channel, broadcast_send, + channel, sleep, - unbounded_channel, AsyncRwLock, AtomicBool, AtomicUsize, BroadcastSender, Mutex, + Receiver, RefCount, RefSwap, RwLock, - UnboundedReceiver, - UnboundedSender, + Sender, + }, + trace, + types::{ + config::{ClusterDiscoveryPolicy, Config, ConnectionConfig, PerformanceConfig, ReconnectPolicy, ServerConfig}, + ClientState, + ClusterStateChange, + KeyspaceEvent, + Message, + RespVersion, }, - types::*, utils, }; use bytes_utils::Str; @@ -35,26 +42,25 @@ use std::{ops::DerefMut, time::Duration}; use crate::modules::metrics::MovingStats; #[cfg(feature = "credential-provider")] use crate::{ - clients::RedisClient, - interfaces::RedisResult, + clients::Client, + interfaces::FredResult, interfaces::{AuthInterface, ClientLike}, runtime::{spawn, JoinHandle}, }; #[cfg(feature = "replicas")] use std::collections::HashMap; -use std::collections::HashSet; -pub type CommandSender = UnboundedSender; -pub type CommandReceiver = UnboundedReceiver; +pub type CommandSender = Sender; +pub type CommandReceiver = Receiver; #[cfg(feature = "i-tracking")] -use crate::types::Invalidation; +use crate::types::client::Invalidation; pub struct Notifications { /// The client ID. pub id: Str, /// A broadcast channel for the `on_error` interface. - pub errors: RefSwap>>, + pub errors: RefSwap)>>>, /// A broadcast channel for the `on_message` interface. pub pubsub: RefSwap>>, /// A broadcast channel for the `on_keyspace_event` interface. @@ -64,7 +70,7 @@ pub struct Notifications { /// A broadcast channel for the `on_cluster_change` interface. pub cluster_change: RefSwap>>>, /// A broadcast channel for the `on_connect` interface. - pub connect: RefSwap>>>, + pub connect: RefSwap>>>, /// A channel for events that should close all client tasks with `Canceled` errors. /// /// Emitted when QUIT, SHUTDOWN, etc are called. @@ -106,8 +112,8 @@ impl Notifications { utils::swap_new_broadcast_channel(&self.unresponsive, capacity); } - pub fn broadcast_error(&self, error: RedisError) { - broadcast_send(self.errors.load().as_ref(), &error, |err| { + pub fn broadcast_error(&self, error: Error, server: Option) { + broadcast_send(self.errors.load().as_ref(), &(error, server), |(err, _)| { debug!("{}: No `on_error` listener. The error was: {err:?}", self.id); }); } @@ -136,14 +142,14 @@ impl Notifications { }); } - pub fn broadcast_connect(&self, result: Result<(), RedisError>) { + pub fn broadcast_connect(&self, result: Result<(), Error>) { broadcast_send(self.connect.load().as_ref(), &result, |_| { debug!("{}: No `on_connect` listeners.", self.id); }); } /// Interrupt any tokio `sleep` calls. - //`RedisClientInner::wait_with_interrupt` hides the subscription part from callers. + //`ClientInner::wait_with_interrupt` hides the subscription part from callers. pub fn broadcast_close(&self) { broadcast_send(&self.close, &(), |_| { debug!("{}: No `close` listeners.", self.id); @@ -216,17 +222,15 @@ impl ClientCounters { /// Cached state related to the server(s). pub struct ServerState { - pub kind: ServerKind, - pub connections: HashSet, + pub kind: ServerKind, #[cfg(feature = "replicas")] - pub replicas: HashMap, + pub replicas: HashMap, } impl ServerState { - pub fn new(config: &RedisConfig) -> Self { + pub fn new(config: &Config) -> Self { ServerState { kind: ServerKind::new(config), - connections: HashSet::new(), #[cfg(feature = "replicas")] replicas: HashMap::new(), } @@ -259,7 +263,7 @@ pub enum ServerKind { impl ServerKind { /// Create a new, empty server state cache. - pub fn new(config: &RedisConfig) -> Self { + pub fn new(config: &Config) -> Self { match config.server { ServerConfig::Clustered { .. } => ServerKind::Cluster { version: None, @@ -315,24 +319,18 @@ impl ServerKind { } } - pub fn with_cluster_state(&self, func: F) -> Result + pub fn with_cluster_state(&self, func: F) -> Result where - F: FnOnce(&ClusterRouting) -> Result, + F: FnOnce(&ClusterRouting) -> Result, { if let ServerKind::Cluster { ref cache, .. } = *self { if let Some(state) = cache.as_ref() { func(state) } else { - Err(RedisError::new( - RedisErrorKind::Cluster, - "Missing cluster routing state.", - )) + Err(Error::new(ErrorKind::Cluster, "Missing cluster routing state.")) } } else { - Err(RedisError::new( - RedisErrorKind::Cluster, - "Missing cluster routing state.", - )) + Err(Error::new(ErrorKind::Cluster, "Missing cluster routing state.")) } } @@ -378,13 +376,12 @@ impl ServerKind { } } -// TODO make a config option for other defaults and extend this fn create_resolver(id: &Str) -> RefCount { RefCount::new(DefaultResolver::new(id)) } #[cfg(feature = "credential-provider")] -fn spawn_credential_refresh(client: RedisClient, interval: Duration) -> JoinHandle> { +fn spawn_credential_refresh(client: Client, interval: Duration) -> JoinHandle> { spawn(async move { loop { trace!( @@ -428,7 +425,7 @@ fn spawn_credential_refresh(client: RedisClient, interval: Duration) -> JoinHand }) } -pub struct RedisClientInner { +pub struct ClientInner { /// An internal lock used to sync certain select operations that should not run concurrently across tasks. pub _lock: Mutex<()>, /// The client ID used for logging and the default `CLIENT SETNAME` value. @@ -438,7 +435,7 @@ pub struct RedisClientInner { /// The state of the underlying connection. pub state: RwLock, /// Client configuration options. - pub config: RefCount, + pub config: RefCount, /// Connection configuration options. pub connection: RefCount, /// Performance config options for the client. @@ -452,7 +449,7 @@ pub struct RedisClientInner { /// The DNS resolver to use when establishing new connections. pub resolver: AsyncRwLock>, /// A backchannel that can be used to control the router connections even while the connections are blocked. - pub backchannel: RefCount>, + pub backchannel: RefCount, /// Server state cache for various deployment types. pub server_state: RwLock, @@ -463,7 +460,7 @@ pub struct RedisClientInner { /// A handle to a task that refreshes credentials on an interval. #[cfg(feature = "credential-provider")] - pub credentials_task: RwLock>>>, + pub credentials_task: RwLock>>>, /// Command latency metrics. #[cfg(feature = "metrics")] pub latency_stats: RwLock, @@ -479,28 +476,28 @@ pub struct RedisClientInner { } #[cfg(feature = "credential-provider")] -impl Drop for RedisClientInner { +impl Drop for ClientInner { fn drop(&mut self) { self.abort_credential_refresh_task(); } } -impl RedisClientInner { +impl ClientInner { pub fn new( - config: RedisConfig, + config: Config, perf: PerformanceConfig, connection: ConnectionConfig, policy: Option, - ) -> RefCount { + ) -> RefCount { let id = Str::from(format!("fred-{}", utils::random_string(10))); let resolver = AsyncRwLock::new(create_resolver(&id)); - let (command_tx, command_rx) = unbounded_channel(); + let (command_tx, command_rx) = channel(connection.max_command_buffer_len); let notifications = RefCount::new(Notifications::new(&id, perf.broadcast_channel_capacity)); let (config, policy) = (RefCount::new(config), RwLock::new(policy)); let performance = RefSwap::new(RefCount::new(perf)); let (counters, state) = (ClientCounters::default(), RwLock::new(ClientState::Disconnected)); let command_rx = RwLock::new(Some(command_rx)); - let backchannel = RefCount::new(AsyncRwLock::new(Backchannel::default())); + let backchannel = RefCount::new(Backchannel::default()); let server_state = RwLock::new(ServerState::new(&config)); let resp3 = if config.version == RespVersion::RESP3 { RefCount::new(AtomicBool::new(true)) @@ -508,11 +505,9 @@ impl RedisClientInner { RefCount::new(AtomicBool::new(false)) }; let connection = RefCount::new(connection); - #[cfg(feature = "glommio")] - let command_tx = command_tx.into(); let command_tx = RefSwap::new(RefCount::new(command_tx)); - RefCount::new(RedisClientInner { + RefCount::new(ClientInner { _lock: Mutex::new(()), #[cfg(feature = "metrics")] latency_stats: RwLock::new(MovingStats::default()), @@ -542,20 +537,8 @@ impl RedisClientInner { }) } - pub fn add_connection(&self, server: &Server) { - self.server_state.write().connections.insert(server.clone()); - } - - pub fn remove_connection(&self, server: &Server) { - self.server_state.write().connections.remove(server); - } - pub fn active_connections(&self) -> Vec { - self.server_state.read().connections.iter().cloned().collect() - } - - pub fn is_pipelined(&self) -> bool { - self.performance.load().as_ref().auto_pipeline + self.backchannel.connection_ids.lock().keys().cloned().collect() } #[cfg(feature = "replicas")] @@ -584,6 +567,10 @@ impl RedisClientInner { self.server_state.write().replicas.clear() } + pub fn has_unresponsive_duration(&self) -> bool { + self.connection.unresponsive.max_timeout.is_some() + } + pub fn shared_resp3(&self) -> RefCount { self.resp3.clone() } @@ -621,9 +608,9 @@ impl RedisClientInner { self.server_state.read().kind.num_cluster_nodes() } - pub fn with_cluster_state(&self, func: F) -> Result + pub fn with_cluster_state(&self, func: F) -> Result where - F: FnOnce(&ClusterRouting) -> Result, + F: FnOnce(&ClusterRouting) -> Result, { self.server_state.read().kind.with_cluster_state(func) } @@ -724,7 +711,7 @@ impl RedisClientInner { } pub async fn set_blocked_server(&self, server: &Server) { - self.backchannel.write().await.set_blocked(server); + self.backchannel.blocked.lock().replace(server.clone()); } pub fn should_reconnect(&self) -> bool { @@ -734,9 +721,6 @@ impl RedisClientInner { .as_ref() .map(|policy| policy.should_reconnect()) .unwrap_or(false); - - // do not attempt a reconnection if the client is intentionally disconnecting. the QUIT and SHUTDOWN commands set - // this flag. let is_disconnecting = utils::read_locked(&self.state) == ClientState::Disconnecting; debug!( @@ -746,59 +730,40 @@ impl RedisClientInner { has_policy && !is_disconnecting } - pub fn send_reconnect( - self: &RefCount, - server: Option, - force: bool, - tx: Option, - ) { - debug!("{}: Sending reconnect message to router for {:?}", self.id, server); - - let cmd = RouterCommand::Reconnect { - server, - force, - tx, - #[cfg(feature = "replicas")] - replica: false, - }; - if let Err(_) = interfaces::send_to_router(self, cmd) { - warn!("{}: Error sending reconnect command to router.", self.id); + pub fn reset_reconnection_attempts(&self) { + if let Some(policy) = self.policy.write().deref_mut() { + policy.reset_attempts(); } } - #[cfg(feature = "replicas")] - pub fn send_replica_reconnect(self: &RefCount, server: &Server) { - debug!( - "{}: Sending replica reconnect message to router for {:?}", - self.id, server - ); + pub fn should_cluster_sync(&self, error: &Error) -> bool { + self.config.server.is_clustered() && error.is_cluster() + } - let cmd = RouterCommand::Reconnect { - server: Some(server.clone()), - force: false, - tx: None, - replica: true, - }; - if let Err(_) = interfaces::send_to_router(self, cmd) { - warn!("{}: Error sending reconnect command to router.", self.id); - } + pub async fn update_backchannel(&self, transport: ExclusiveConnection) { + self.backchannel.transport.write().await.replace(transport); } - pub fn reset_reconnection_attempts(&self) { - if let Some(policy) = self.policy.write().deref_mut() { - policy.reset_attempts(); - } + pub fn client_state(&self) -> ClientState { + self.state.read().clone() } - pub fn should_cluster_sync(&self, error: &RedisError) -> bool { - self.config.server.is_clustered() && error.is_cluster() + pub fn set_client_state(&self, client_state: ClientState) { + *self.state.write() = client_state; } - pub async fn update_backchannel(&self, transport: RedisTransport) { - self.backchannel.write().await.transport = Some(transport); + pub fn cas_client_state(&self, expected: ClientState, new_state: ClientState) -> bool { + let mut state_guard = self.state.write(); + + if *state_guard != expected { + false + } else { + *state_guard = new_state; + true + } } - pub async fn wait_with_interrupt(&self, duration: Duration) -> Result<(), RedisError> { + pub async fn wait_with_interrupt(&self, duration: Duration) -> Result<(), Error> { #[allow(unused_mut)] let mut rx = self.notifications.close.subscribe(); debug!("{}: Sleeping for {} ms", self.id, duration.as_millis()); @@ -807,33 +772,84 @@ impl RedisClientInner { tokio::pin!(recv_ft); if let Either::Right((_, _)) = select(sleep_ft, recv_ft).await { - Err(RedisError::new(RedisErrorKind::Canceled, "Connection(s) closed.")) + Err(Error::new(ErrorKind::Canceled, "Connection(s) closed.")) } else { Ok(()) } } #[cfg(not(feature = "glommio"))] - pub fn send_command(&self, command: RouterCommand) -> Result<(), RouterCommand> { - self.command_tx.load().send(command).map_err(|e| e.0) + pub fn send_command(self: &RefCount, command: RouterCommand) -> Result<(), RouterCommand> { + use tokio::sync::mpsc::error::TrySendError; + + if let Err(v) = self.command_tx.load().try_send(command) { + match v { + TrySendError::Closed(c) => Err(c), + TrySendError::Full(c) => match c { + RouterCommand::Command(mut cmd) => { + trace::backpressure_event(&cmd, None); + cmd.respond_to_caller(Err(Error::new_backpressure())); + Ok(()) + }, + RouterCommand::Pipeline { mut commands, .. } => { + if let Some(mut cmd) = commands.pop() { + cmd.respond_to_caller(Err(Error::new_backpressure())); + } + Ok(()) + }, + #[cfg(feature = "transactions")] + RouterCommand::Transaction { tx, .. } => { + let _ = tx.send(Err(Error::new_backpressure())); + Ok(()) + }, + _ => Err(c), + }, + } + } else { + Ok(()) + } } #[cfg(feature = "glommio")] - pub fn send_command(&self, command: RouterCommand) -> Result<(), RouterCommand> { - self.command_tx.load().try_send(command).map_err(|e| match e { - glommio::GlommioError::Closed(glommio::ResourceType::Channel(v)) => v, - glommio::GlommioError::WouldBlock(glommio::ResourceType::Channel(v)) => v, - _ => unreachable!(), - }) + pub fn send_command(self: &RefCount, command: RouterCommand) -> Result<(), RouterCommand> { + use glommio::{GlommioError, ResourceType}; + + if let Err(e) = self.command_tx.load().try_send(command) { + match e { + GlommioError::Closed(ResourceType::Channel(v)) => Err(v), + GlommioError::WouldBlock(ResourceType::Channel(v)) => match v { + RouterCommand::Command(mut cmd) => { + trace::backpressure_event(&cmd, None); + cmd.respond_to_caller(Err(Error::new_backpressure())); + Ok(()) + }, + RouterCommand::Pipeline { mut commands, .. } => { + if let Some(mut cmd) = commands.pop() { + cmd.respond_to_caller(Err(Error::new_backpressure())); + } + Ok(()) + }, + #[cfg(feature = "transactions")] + RouterCommand::Transaction { tx, .. } => { + let _ = tx.send(Err(Error::new_backpressure())); + Ok(()) + }, + _ => Err(v), + }, + _ => unreachable!(), + } + } else { + Ok(()) + } } #[cfg(not(feature = "credential-provider"))] - pub async fn read_credentials(&self, _: &Server) -> Result<(Option, Option), RedisError> { + pub async fn read_credentials(&self, _: &Server) -> Result<(Option, Option), Error> { Ok((self.config.username.clone(), self.config.password.clone())) } #[cfg(feature = "credential-provider")] - pub async fn read_credentials(&self, server: &Server) -> Result<(Option, Option), RedisError> { + pub async fn read_credentials(&self, server: &Server) -> Result<(Option, Option), Error> { Ok(if let Some(ref provider) = self.config.credential_provider { provider.fetch(Some(server)).await? } else { diff --git a/src/modules/mocks.rs b/src/modules/mocks.rs index 057cc963..d0ab830f 100644 --- a/src/modules/mocks.rs +++ b/src/modules/mocks.rs @@ -1,4 +1,4 @@ -//! An interface for mocking Redis commands. +//! An interface for mocking commands. //! //! There are several patterns for utilizing a mocking layer in tests. In some cases a simple "echo" interface is //! enough, or in others callers may need to buffer a series of commands before performing any assertions, etc. More @@ -13,9 +13,9 @@ //! The base `Mocks` trait is directly exposed so callers can implement their own mocking layer as well. use crate::{ - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, runtime::Mutex, - types::{RedisKey, RedisValue}, + types::{Key, Value}, }; use bytes_utils::Str; use fred_macros::rm_send_if; @@ -24,7 +24,7 @@ use std::{ fmt::Debug, }; -/// A wrapper type for the parts of an internal Redis command. +/// A wrapper type for the parts of an internal command. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MockCommand { /// The first word in the command string. For example: @@ -38,14 +38,14 @@ pub struct MockCommand { /// * `INCRBY` - `None` pub subcommand: Option, /// The ordered list of arguments to the command. - pub args: Vec, + pub args: Vec, } -/// An interface for intercepting and processing Redis commands in a mocking layer. +/// An interface for intercepting and processing commands in a mocking layer. #[allow(unused_variables)] #[rm_send_if(feature = "glommio")] pub trait Mocks: Debug + Send + Sync + 'static { - /// Intercept and process a Redis command, returning any `RedisValue`. + /// Intercept and process a command, returning any `Value`. /// /// # Important /// @@ -53,20 +53,20 @@ pub trait Mocks: Debug + Send + Sync + 'static { /// The parsing logic following each command on the public interface will still be applied. __Most__ commands /// perform minimal parsing on the response, but some may require specific response formats to function correctly. /// - /// `RedisValue::Queued` can be used to return a value that will work almost anywhere. - fn process_command(&self, command: MockCommand) -> Result; + /// `Value::Queued` can be used to return a value that will work almost anywhere. + fn process_command(&self, command: MockCommand) -> Result; - /// Intercept and process an entire transaction. The provided commands will **not** include `MULTI` or `EXEC`. + /// Intercept and process an entire transaction. The provided commands will **not** include `EXEC`. /// /// Note: The default implementation redirects each command to the [process_command](Self::process_command) /// function. The results of each call are buffered and returned as an array. - fn process_transaction(&self, commands: Vec) -> Result { + fn process_transaction(&self, commands: Vec) -> Result { let mut out = Vec::with_capacity(commands.len()); for command in commands.into_iter() { out.push(self.process_command(command)?); } - Ok(RedisValue::Array(out)) + Ok(Value::Array(out)) } } @@ -76,14 +76,14 @@ pub trait Mocks: Debug + Send + Sync + 'static { /// # use fred::prelude::*; /// #[tokio::test] /// async fn should_use_echo_mock() { -/// let config = RedisConfig { +/// let config = Config { /// mocks: Some(Arc::new(Echo)), /// ..Default::default() /// }; /// let client = Builder::from_config(config).build().unwrap(); /// client.init().await.expect("Failed to connect"); /// -/// let actual: Vec = client +/// let actual: Vec = client /// .set( /// "foo", /// "bar", @@ -94,7 +94,7 @@ pub trait Mocks: Debug + Send + Sync + 'static { /// .await /// .expect("Failed to call SET"); /// -/// let expected: Vec = vec![ +/// let expected: Vec = vec![ /// "foo".into(), /// "bar".into(), /// "EX".into(), @@ -108,8 +108,8 @@ pub trait Mocks: Debug + Send + Sync + 'static { pub struct Echo; impl Mocks for Echo { - fn process_command(&self, command: MockCommand) -> Result { - Ok(RedisValue::Array(command.args)) + fn process_command(&self, command: MockCommand) -> Result { + Ok(Value::Array(command.args)) } } @@ -121,7 +121,7 @@ impl Mocks for Echo { /// ```rust no_run /// #[tokio::test] /// async fn should_use_echo_mock() { -/// let config = RedisConfig { +/// let config = Config { /// mocks: Some(Arc::new(SimpleMap::new())), /// ..Default::default() /// }; @@ -140,7 +140,7 @@ impl Mocks for Echo { /// ``` #[derive(Debug)] pub struct SimpleMap { - values: Mutex>, + values: Mutex>, } impl SimpleMap { @@ -157,48 +157,48 @@ impl SimpleMap { } /// Take the inner map. - pub fn take(&self) -> HashMap { + pub fn take(&self) -> HashMap { self.values.lock().drain().collect() } /// Read a copy of the inner map. - pub fn inner(&self) -> HashMap { + pub fn inner(&self) -> HashMap { self.values.lock().iter().map(|(k, v)| (k.clone(), v.clone())).collect() } /// Perform a `GET` operation. - pub fn get(&self, args: Vec) -> Result { - let key: RedisKey = match args.first() { + pub fn get(&self, args: Vec) -> Result { + let key: Key = match args.first() { Some(key) => key.clone().try_into()?, - None => return Err(RedisError::new(RedisErrorKind::InvalidArgument, "Missing key.")), + None => return Err(Error::new(ErrorKind::InvalidArgument, "Missing key.")), }; - Ok(self.values.lock().get(&key).cloned().unwrap_or(RedisValue::Null)) + Ok(self.values.lock().get(&key).cloned().unwrap_or(Value::Null)) } /// Perform a `SET` operation. - pub fn set(&self, mut args: Vec) -> Result { + pub fn set(&self, mut args: Vec) -> Result { args.reverse(); - let key: RedisKey = match args.pop() { + let key: Key = match args.pop() { Some(key) => key.try_into()?, - None => return Err(RedisError::new(RedisErrorKind::InvalidArgument, "Missing key.")), + None => return Err(Error::new(ErrorKind::InvalidArgument, "Missing key.")), }; let value = match args.pop() { Some(value) => value, - None => return Err(RedisError::new(RedisErrorKind::InvalidArgument, "Missing value.")), + None => return Err(Error::new(ErrorKind::InvalidArgument, "Missing value.")), }; let _ = self.values.lock().insert(key, value); - Ok(RedisValue::new_ok()) + Ok(Value::new_ok()) } /// Perform a `DEL` operation. - pub fn del(&self, args: Vec) -> Result { + pub fn del(&self, args: Vec) -> Result { let mut guard = self.values.lock(); let mut count = 0; for arg in args.into_iter() { - let key: RedisKey = arg.try_into()?; + let key: Key = arg.try_into()?; if guard.remove(&key).is_some() { count += 1; } @@ -209,12 +209,12 @@ impl SimpleMap { } impl Mocks for SimpleMap { - fn process_command(&self, command: MockCommand) -> Result { + fn process_command(&self, command: MockCommand) -> Result { match &*command.cmd { "GET" => self.get(command.args), "SET" => self.set(command.args), "DEL" => self.del(command.args), - _ => Err(RedisError::new(RedisErrorKind::Unknown, "Unimplemented.")), + _ => Err(Error::new(ErrorKind::Unknown, "Unimplemented.")), } } } @@ -225,7 +225,7 @@ impl Mocks for SimpleMap { /// #[tokio::test] /// async fn should_use_buffer_mock() { /// let buffer = Arc::new(Buffer::new()); -/// let config = RedisConfig { +/// let config = Config { /// mocks: Some(buffer.clone()), /// ..Default::default() /// }; @@ -241,7 +241,7 @@ impl Mocks for SimpleMap { /// let actual: String = client.get("foo").await.expect("Failed to call GET"); /// assert_eq!(actual, "QUEUED"); /// -/// // note: values that act as keys use the `RedisValue::Bytes` variant internally +/// // note: values that act as keys use the `Value::Bytes` variant internally /// let expected = vec![ /// MockCommand { /// cmd: "SET".into(), @@ -312,9 +312,9 @@ impl Buffer { } impl Mocks for Buffer { - fn process_command(&self, command: MockCommand) -> Result { + fn process_command(&self, command: MockCommand) -> Result { self.push_back(command); - Ok(RedisValue::Queued) + Ok(Value::Queued) } } @@ -323,22 +323,22 @@ impl Mocks for Buffer { mod tests { use super::*; use crate::{ - clients::RedisClient, - error::RedisError, + clients::Client, + error::Error, interfaces::{ClientLike, KeysInterface}, mocks::{Buffer, Echo, Mocks, SimpleMap}, prelude::Expiration, runtime::JoinHandle, - types::{RedisConfig, RedisValue, SetOptions}, + types::{config::Config, SetOptions, Value}, }; use std::sync::Arc; - async fn create_mock_client(mocks: Arc) -> (RedisClient, JoinHandle>) { - let config = RedisConfig { + async fn create_mock_client(mocks: Arc) -> (Client, JoinHandle>) { + let config = Config { mocks: Some(mocks), ..Default::default() }; - let client = RedisClient::new(config, None, None, None); + let client = Client::new(config, None, None, None); let jh = client.connect(); client.wait_for_connect().await.expect("Failed to connect"); @@ -354,12 +354,12 @@ mod tests { async fn should_use_echo_mock() { let (client, _) = create_mock_client(Arc::new(Echo)).await; - let actual: Vec = client + let actual: Vec = client .set("foo", "bar", Some(Expiration::EX(100)), Some(SetOptions::NX), false) .await .expect("Failed to call SET"); - let expected: Vec = vec!["foo".into(), "bar".into(), "EX".into(), 100.into(), "NX".into()]; + let expected: Vec = vec!["foo".into(), "bar".into(), "EX".into(), 100.into(), "NX".into()]; assert_eq!(actual, expected); } diff --git a/src/modules/response.rs b/src/modules/response.rs index 021359f0..035ac059 100644 --- a/src/modules/response.rs +++ b/src/modules/response.rs @@ -1,6 +1,6 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - types::{RedisKey, RedisValue, QUEUED}, + error::{Error, ErrorKind}, + types::{Key, Value, QUEUED}, }; use bytes::Bytes; use bytes_utils::Str; @@ -10,9 +10,9 @@ use std::{ }; #[cfg(feature = "i-cluster")] -use crate::types::ClusterInfo; +use crate::types::cluster::ClusterInfo; #[cfg(feature = "i-geo")] -use crate::types::GeoPosition; +use crate::types::geo::GeoPosition; #[cfg(feature = "i-slowlog")] use crate::types::SlowlogEntry; #[cfg(feature = "i-memory")] @@ -21,9 +21,6 @@ use crate::types::{DatabaseMemoryStats, MemoryStats}; #[allow(unused_imports)] use std::any::type_name; -#[cfg(feature = "serde-json")] -use serde_json::{Map, Value}; - macro_rules! debug_type( ($($arg:tt)*) => { #[cfg(feature="network-logs")] @@ -47,27 +44,27 @@ macro_rules! check_single_bulk_reply( macro_rules! to_signed_number( ($t:ty, $v:expr) => { match $v { - RedisValue::Double(f) => Ok(f as $t), - RedisValue::Integer(i) => Ok(i as $t), - RedisValue::String(s) => s.parse::<$t>().map_err(|e| e.into()), - RedisValue::Array(mut a) => if a.len() == 1 { + Value::Double(f) => Ok(f as $t), + Value::Integer(i) => Ok(i as $t), + Value::String(s) => s.parse::<$t>().map_err(|e| e.into()), + Value::Array(mut a) => if a.len() == 1 { match a.pop().unwrap() { - RedisValue::Integer(i) => Ok(i as $t), - RedisValue::String(s) => s.parse::<$t>().map_err(|e| e.into()), + Value::Integer(i) => Ok(i as $t), + Value::String(s) => s.parse::<$t>().map_err(|e| e.into()), #[cfg(feature = "default-nil-types")] - RedisValue::Null => Ok(0), + Value::Null => Ok(0), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => Err(RedisError::new(RedisErrorKind::NotFound, "Cannot convert nil to number.")), - _ => Err(RedisError::new_parse("Cannot convert to number.")) + Value::Null => Err(Error::new(ErrorKind::NotFound, "Cannot convert nil to number.")), + _ => Err(Error::new_parse("Cannot convert to number.")) } }else{ - Err(RedisError::new_parse("Cannot convert array to number.")) + Err(Error::new_parse("Cannot convert array to number.")) } #[cfg(feature = "default-nil-types")] - RedisValue::Null => Ok(0), + Value::Null => Ok(0), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => Err(RedisError::new(RedisErrorKind::NotFound, "Cannot convert nil to number.")), - _ => Err(RedisError::new_parse("Cannot convert to number.")), + Value::Null => Err(Error::new(ErrorKind::NotFound, "Cannot convert nil to number.")), + _ => Err(Error::new_parse("Cannot convert to number.")), } } ); @@ -75,47 +72,47 @@ macro_rules! to_signed_number( macro_rules! to_unsigned_number( ($t:ty, $v:expr) => { match $v { - RedisValue::Double(f) => if f.is_sign_negative() { - Err(RedisError::new_parse("Cannot convert from negative number.")) + Value::Double(f) => if f.is_sign_negative() { + Err(Error::new_parse("Cannot convert from negative number.")) }else{ Ok(f as $t) }, - RedisValue::Integer(i) => if i < 0 { - Err(RedisError::new_parse("Cannot convert from negative number.")) + Value::Integer(i) => if i < 0 { + Err(Error::new_parse("Cannot convert from negative number.")) }else{ Ok(i as $t) }, - RedisValue::String(s) => s.parse::<$t>().map_err(|e| e.into()), - RedisValue::Array(mut a) => if a.len() == 1 { + Value::String(s) => s.parse::<$t>().map_err(|e| e.into()), + Value::Array(mut a) => if a.len() == 1 { match a.pop().unwrap() { - RedisValue::Integer(i) => if i < 0 { - Err(RedisError::new_parse("Cannot convert from negative number.")) + Value::Integer(i) => if i < 0 { + Err(Error::new_parse("Cannot convert from negative number.")) }else{ Ok(i as $t) }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Ok(0), + Value::Null => Ok(0), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => Err(RedisError::new(RedisErrorKind::NotFound, "Cannot convert nil to number.")), - RedisValue::String(s) => s.parse::<$t>().map_err(|e| e.into()), - _ => Err(RedisError::new_parse("Cannot convert to number.")) + Value::Null => Err(Error::new(ErrorKind::NotFound, "Cannot convert nil to number.")), + Value::String(s) => s.parse::<$t>().map_err(|e| e.into()), + _ => Err(Error::new_parse("Cannot convert to number.")) } }else{ - Err(RedisError::new_parse("Cannot convert array to number.")) + Err(Error::new_parse("Cannot convert array to number.")) }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Ok(0), + Value::Null => Ok(0), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => Err(RedisError::new(RedisErrorKind::NotFound, "Cannot convert nil to number.")), - _ => Err(RedisError::new_parse("Cannot convert to number.")), + Value::Null => Err(Error::new(ErrorKind::NotFound, "Cannot convert nil to number.")), + _ => Err(Error::new_parse("Cannot convert to number.")), } } ); macro_rules! impl_signed_number ( ($t:ty) => { - impl FromRedis for $t { - fn from_value(value: RedisValue) -> Result<$t, RedisError> { + impl FromValue for $t { + fn from_value(value: Value) -> Result<$t, Error> { check_single_bulk_reply!(value); to_signed_number!($t, value) } @@ -125,8 +122,8 @@ macro_rules! impl_signed_number ( macro_rules! impl_unsigned_number ( ($t:ty) => { - impl FromRedis for $t { - fn from_value(value: RedisValue) -> Result<$t, RedisError> { + impl FromValue for $t { + fn from_value(value: Value) -> Result<$t, Error> { check_single_bulk_reply!(value); to_unsigned_number!($t, value) } @@ -134,24 +131,24 @@ macro_rules! impl_unsigned_number ( } ); -/// A trait used to [convert](RedisValue::convert) various forms of [RedisValue](RedisValue) into different types. +/// A trait used to [convert](Value::convert) various forms of [Value](Value) into different types. /// /// ## Examples /// /// ```rust -/// # use fred::types::RedisValue; +/// # use fred::types::Value; /// # use std::collections::HashMap; -/// let foo: usize = RedisValue::String("123".into()).convert()?; -/// let foo: i64 = RedisValue::String("123".into()).convert()?; -/// let foo: String = RedisValue::String("123".into()).convert()?; -/// let foo: Vec = RedisValue::Bytes(vec![102, 111, 111].into()).convert()?; -/// let foo: Vec = RedisValue::String("foo".into()).convert()?; -/// let foo: Vec = RedisValue::Array(vec!["a".into(), "b".into()]).convert()?; +/// let foo: usize = Value::String("123".into()).convert()?; +/// let foo: i64 = Value::String("123".into()).convert()?; +/// let foo: String = Value::String("123".into()).convert()?; +/// let foo: Vec = Value::Bytes(vec![102, 111, 111].into()).convert()?; +/// let foo: Vec = Value::String("foo".into()).convert()?; +/// let foo: Vec = Value::Array(vec!["a".into(), "b".into()]).convert()?; /// let foo: HashMap = -/// RedisValue::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]).convert()?; -/// let foo: (String, i64) = RedisValue::Array(vec!["a".into(), 1.into()]).convert()?; +/// Value::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]).convert()?; +/// let foo: (String, i64) = Value::Array(vec!["a".into(), 1.into()]).convert()?; /// let foo: Vec<(String, i64)> = -/// RedisValue::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]).convert()?; +/// Value::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]).convert()?; /// // ... /// ``` /// @@ -164,13 +161,13 @@ macro_rules! impl_unsigned_number ( /// For example: /// /// ```rust -/// # use fred::types::RedisValue; -/// let _: String = RedisValue::Array(vec![]).convert()?; // error -/// let _: String = RedisValue::Array(vec!["a".into()]).convert()?; // "a" -/// let _: String = RedisValue::Array(vec!["a".into(), "b".into()]).convert()?; // error -/// let _: Option = RedisValue::Array(vec![]).convert()?; // None -/// let _: Option = RedisValue::Array(vec!["a".into()]).convert()?; // Some("a") -/// let _: Option = RedisValue::Array(vec!["a".into(), "b".into()]).convert()?; // error +/// # use fred::types::Value; +/// let _: String = Value::Array(vec![]).convert()?; // error +/// let _: String = Value::Array(vec!["a".into()]).convert()?; // "a" +/// let _: String = Value::Array(vec!["a".into(), "b".into()]).convert()?; // error +/// let _: Option = Value::Array(vec![]).convert()?; // None +/// let _: Option = Value::Array(vec!["a".into()]).convert()?; // Some("a") +/// let _: Option = Value::Array(vec!["a".into(), "b".into()]).convert()?; // error /// ``` /// /// ## The `default-nil-types` Feature Flag @@ -180,18 +177,18 @@ macro_rules! impl_unsigned_number ( /// `nil`. /// /// The `default-nil-types` feature flag can enable some further type conversion branches that treat `nil` values as -/// default values for the relevant type. For `RedisValue::Null` these include: +/// default values for the relevant type. For `Value::Null` these include: /// -/// * `impl FromRedis` for `String` or `Str` returns an empty string. -/// * `impl FromRedis` for `Bytes` or `Vec` returns an empty array. -/// * `impl FromRedis` for any integer or float type returns `0` -/// * `impl FromRedis` for `bool` returns `false` -/// * `impl FromRedis` for map or set types return an empty map or set. -pub trait FromRedis: Sized { - fn from_value(value: RedisValue) -> Result; +/// * `impl FromValue` for `String` or `Str` returns an empty string. +/// * `impl FromValue` for `Bytes` or `Vec` returns an empty array. +/// * `impl FromValue` for any integer or float type returns `0` +/// * `impl FromValue` for `bool` returns `false` +/// * `impl FromValue` for map or set types return an empty map or set. +pub trait FromValue: Sized { + fn from_value(value: Value) -> Result; #[doc(hidden)] - fn from_values(values: Vec) -> Result, RedisError> { + fn from_values(values: Vec) -> Result, Error> { values.into_iter().map(|v| Self::from_value(v)).collect() } @@ -206,14 +203,14 @@ pub trait FromRedis: Sized { } } -impl FromRedis for RedisValue { - fn from_value(value: RedisValue) -> Result { +impl FromValue for Value { + fn from_value(value: Value) -> Result { Ok(value) } } -impl FromRedis for () { - fn from_value(_: RedisValue) -> Result { +impl FromValue for () { + fn from_value(_: Value) -> Result { Ok(()) } } @@ -225,8 +222,8 @@ impl_signed_number!(i64); impl_signed_number!(i128); impl_signed_number!(isize); -impl FromRedis for u8 { - fn from_value(value: RedisValue) -> Result { +impl FromValue for u8 { + fn from_value(value: Value) -> Result { check_single_bulk_reply!(value); to_unsigned_number!(u8, value) } @@ -242,54 +239,52 @@ impl_unsigned_number!(u64); impl_unsigned_number!(u128); impl_unsigned_number!(usize); -impl FromRedis for String { - fn from_value(value: RedisValue) -> Result { - debug_type!("FromRedis(String): {:?}", value); +impl FromValue for String { + fn from_value(value: Value) -> Result { + debug_type!("FromValue(String): {:?}", value); check_single_bulk_reply!(value); value .into_string() - .ok_or(RedisError::new_parse("Could not convert to string.")) + .ok_or(Error::new_parse("Could not convert to string.")) } } -impl FromRedis for Str { - fn from_value(value: RedisValue) -> Result { - debug_type!("FromRedis(Str): {:?}", value); +impl FromValue for Str { + fn from_value(value: Value) -> Result { + debug_type!("FromValue(Str): {:?}", value); check_single_bulk_reply!(value); value .into_bytes_str() - .ok_or(RedisError::new_parse("Could not convert to string.")) + .ok_or(Error::new_parse("Could not convert to string.")) } } -impl FromRedis for f64 { - fn from_value(value: RedisValue) -> Result { - debug_type!("FromRedis(f64): {:?}", value); +impl FromValue for f64 { + fn from_value(value: Value) -> Result { + debug_type!("FromValue(f64): {:?}", value); check_single_bulk_reply!(value); - value - .as_f64() - .ok_or(RedisError::new_parse("Could not convert to double.")) + value.as_f64().ok_or(Error::new_parse("Could not convert to double.")) } } -impl FromRedis for f32 { - fn from_value(value: RedisValue) -> Result { - debug_type!("FromRedis(f32): {:?}", value); +impl FromValue for f32 { + fn from_value(value: Value) -> Result { + debug_type!("FromValue(f32): {:?}", value); check_single_bulk_reply!(value); value .as_f64() .map(|f| f as f32) - .ok_or(RedisError::new_parse("Could not convert to float.")) + .ok_or(Error::new_parse("Could not convert to float.")) } } -impl FromRedis for bool { - fn from_value(value: RedisValue) -> Result { - debug_type!("FromRedis(bool): {:?}", value); +impl FromValue for bool { + fn from_value(value: Value) -> Result { + debug_type!("FromValue(bool): {:?}", value); check_single_bulk_reply!(value); if let Some(val) = value.as_bool() { @@ -298,21 +293,21 @@ impl FromRedis for bool { // it's not obvious how to convert the value to a bool in this block, so we go with a // tried and true approach that i'm sure we'll never regret - JS semantics Ok(match value { - RedisValue::String(s) => !s.is_empty(), - RedisValue::Bytes(b) => !b.is_empty(), + Value::String(s) => !s.is_empty(), + Value::Bytes(b) => !b.is_empty(), // everything else should be covered by `as_bool` above - _ => return Err(RedisError::new_parse("Could not convert to bool.")), + _ => return Err(Error::new_parse("Could not convert to bool.")), }) } } } -impl FromRedis for Option +impl FromValue for Option where - T: FromRedis, + T: FromValue, { - fn from_value(value: RedisValue) -> Result, RedisError> { - debug_type!("FromRedis(Option<{}>): {:?}", type_name::(), value); + fn from_value(value: Value) -> Result, Error> { + debug_type!("FromValue(Option<{}>): {:?}", type_name::(), value); if let Some(0) = value.array_len() { Ok(None) @@ -324,40 +319,36 @@ where } } -impl FromRedis for Bytes { - fn from_value(value: RedisValue) -> Result { - debug_type!("FromRedis(Bytes): {:?}", value); +impl FromValue for Bytes { + fn from_value(value: Value) -> Result { + debug_type!("FromValue(Bytes): {:?}", value); check_single_bulk_reply!(value); - value - .into_bytes() - .ok_or(RedisError::new_parse("Cannot parse into bytes.")) + value.into_bytes().ok_or(Error::new_parse("Cannot parse into bytes.")) } } -impl FromRedis for Vec +impl FromValue for Vec where - T: FromRedis, + T: FromValue, { - fn from_value(value: RedisValue) -> Result, RedisError> { - debug_type!("FromRedis(Vec<{}>): {:?}", type_name::(), value); + fn from_value(value: Value) -> Result, Error> { + debug_type!("FromValue(Vec<{}>): {:?}", type_name::(), value); match value { - RedisValue::Bytes(bytes) => { - T::from_owned_bytes(bytes.to_vec()).ok_or(RedisError::new_parse("Cannot convert from bytes")) - }, - RedisValue::String(string) => { + Value::Bytes(bytes) => T::from_owned_bytes(bytes.to_vec()).ok_or(Error::new_parse("Cannot convert from bytes")), + Value::String(string) => { // hacky way to check if T is bytes without consuming `string` if T::from_owned_bytes(Vec::new()).is_some() { T::from_owned_bytes(string.into_inner().to_vec()) - .ok_or(RedisError::new_parse("Could not convert string to bytes.")) + .ok_or(Error::new_parse("Could not convert string to bytes.")) } else { - Ok(vec![T::from_value(RedisValue::String(string))?]) + Ok(vec![T::from_value(Value::String(string))?]) } }, - RedisValue::Array(values) => { + Value::Array(values) => { if !values.is_empty() { - if let RedisValue::Array(_) = &values[0] { + if let Value::Array(_) = &values[0] { values.into_iter().map(|x| T::from_value(x)).collect() } else { T::from_values(values) @@ -366,13 +357,13 @@ where Ok(vec![]) } }, - RedisValue::Map(map) => { + Value::Map(map) => { // not being able to use collect() here is unfortunate let out = Vec::with_capacity(map.len() * 2); map.inner().into_iter().try_fold(out, |mut out, (key, value)| { if T::is_tuple() { // try to convert to a 2-element tuple since that's a common use case from `HGETALL`, etc - out.push(T::from_value(RedisValue::Array(vec![key.into(), value]))?); + out.push(T::from_value(Value::Array(vec![key.into(), value]))?); } else { out.push(T::from_value(key.into())?); out.push(T::from_value(value)?); @@ -381,40 +372,40 @@ where Ok(out) }) }, - RedisValue::Integer(i) => Ok(vec![T::from_value(RedisValue::Integer(i))?]), - RedisValue::Double(f) => Ok(vec![T::from_value(RedisValue::Double(f))?]), - RedisValue::Boolean(b) => Ok(vec![T::from_value(RedisValue::Boolean(b))?]), - RedisValue::Queued => Ok(vec![T::from_value(RedisValue::from_static_str(QUEUED))?]), - RedisValue::Null => Ok(Vec::new()), + Value::Integer(i) => Ok(vec![T::from_value(Value::Integer(i))?]), + Value::Double(f) => Ok(vec![T::from_value(Value::Double(f))?]), + Value::Boolean(b) => Ok(vec![T::from_value(Value::Boolean(b))?]), + Value::Queued => Ok(vec![T::from_value(Value::from_static_str(QUEUED))?]), + Value::Null => Ok(Vec::new()), } } } -impl FromRedis for [T; N] +impl FromValue for [T; N] where - T: FromRedis, + T: FromValue, { - fn from_value(value: RedisValue) -> Result<[T; N], RedisError> { - debug_type!("FromRedis([{}; {}]): {:?}", type_name::(), N, value); + fn from_value(value: Value) -> Result<[T; N], Error> { + debug_type!("FromValue([{}; {}]): {:?}", type_name::(), N, value); // use the `from_value` impl for Vec let value: Vec = value.convert()?; let len = value.len(); value .try_into() - .map_err(|_| RedisError::new_parse(format!("Failed to convert to array. Expected {}, found {}.", N, len))) + .map_err(|_| Error::new_parse(format!("Failed to convert to array. Expected {}, found {}.", N, len))) } } -impl FromRedis for HashMap +impl FromValue for HashMap where - K: FromRedisKey + Eq + Hash, - V: FromRedis, + K: FromKey + Eq + Hash, + V: FromValue, S: BuildHasher + Default, { - fn from_value(value: RedisValue) -> Result { + fn from_value(value: Value) -> Result { debug_type!( - "FromRedis(HashMap<{}, {}>): {:?}", + "FromValue(HashMap<{}, {}>): {:?}", type_name::(), type_name::(), value @@ -423,9 +414,9 @@ where let as_map = if value.is_array() || value.is_map() || value.is_null() { value .into_map() - .map_err(|_| RedisError::new_parse("Cannot convert to map."))? + .map_err(|_| Error::new_parse("Cannot convert to map."))? } else { - return Err(RedisError::new_parse("Cannot convert to map.")); + return Err(Error::new_parse("Cannot convert to map.")); }; as_map @@ -436,25 +427,25 @@ where } } -impl FromRedis for HashSet +impl FromValue for HashSet where - V: FromRedis + Hash + Eq, + V: FromValue + Hash + Eq, S: BuildHasher + Default, { - fn from_value(value: RedisValue) -> Result { - debug_type!("FromRedis(HashSet<{}>): {:?}", type_name::(), value); + fn from_value(value: Value) -> Result { + debug_type!("FromValue(HashSet<{}>): {:?}", type_name::(), value); value.into_set()?.into_iter().map(|v| V::from_value(v)).collect() } } -impl FromRedis for BTreeMap +impl FromValue for BTreeMap where - K: FromRedisKey + Ord, - V: FromRedis, + K: FromKey + Ord, + V: FromValue, { - fn from_value(value: RedisValue) -> Result { + fn from_value(value: Value) -> Result { debug_type!( - "FromRedis(BTreeMap<{}, {}>): {:?}", + "FromValue(BTreeMap<{}, {}>): {:?}", type_name::(), type_name::(), value @@ -462,9 +453,9 @@ where let as_map = if value.is_array() || value.is_map() || value.is_null() { value .into_map() - .map_err(|_| RedisError::new_parse("Cannot convert to map."))? + .map_err(|_| Error::new_parse("Cannot convert to map."))? } else { - return Err(RedisError::new_parse("Cannot convert to map.")); + return Err(Error::new_parse("Cannot convert to map.")); }; as_map @@ -475,55 +466,55 @@ where } } -impl FromRedis for BTreeSet +impl FromValue for BTreeSet where - V: FromRedis + Ord, + V: FromValue + Ord, { - fn from_value(value: RedisValue) -> Result { - debug_type!("FromRedis(BTreeSet<{}>): {:?}", type_name::(), value); + fn from_value(value: Value) -> Result { + debug_type!("FromValue(BTreeSet<{}>): {:?}", type_name::(), value); value.into_set()?.into_iter().map(|v| V::from_value(v)).collect() } } // adapted from mitsuhiko -macro_rules! impl_from_redis_tuple { +macro_rules! impl_from_value_tuple { () => (); ($($name:ident,)+) => ( #[doc(hidden)] - impl<$($name: FromRedis),*> FromRedis for ($($name,)*) { + impl<$($name: FromValue),*> FromValue for ($($name,)*) { fn is_tuple() -> bool { true } #[allow(non_snake_case, unused_variables)] - fn from_value(v: RedisValue) -> Result<($($name,)*), RedisError> { - if let RedisValue::Array(mut values) = v { + fn from_value(v: Value) -> Result<($($name,)*), Error> { + if let Value::Array(mut values) = v { let mut n = 0; $(let $name = (); n += 1;)* - debug_type!("FromRedis({}-tuple): {:?}", n, values); + debug_type!("FromValue({}-tuple): {:?}", n, values); if values.len() != n { - return Err(RedisError::new_parse(format!("Invalid tuple dimension. Expected {}, found {}.", n, values.len()))); + return Err(Error::new_parse(format!("Invalid tuple dimension. Expected {}, found {}.", n, values.len()))); } // since we have ownership over the values we have some freedom in how to implement this values.reverse(); Ok(($({let $name = (); values .pop() - .ok_or(RedisError::new_parse("Expected value, found none."))? + .ok_or(Error::new_parse("Expected value, found none."))? .convert()? },)*)) }else{ - Err(RedisError::new_parse("Could not convert to tuple.")) + Err(Error::new_parse("Could not convert to tuple.")) } } #[allow(non_snake_case, unused_variables)] - fn from_values(mut values: Vec) -> Result, RedisError> { + fn from_values(mut values: Vec) -> Result, Error> { let mut n = 0; $(let $name = (); n += 1;)* - debug_type!("FromRedis({}-tuple): {:?}", n, values); + debug_type!("FromValue({}-tuple): {:?}", n, values); if values.len() % n != 0 { - return Err(RedisError::new_parse(format!("Invalid tuple dimension. Expected {}, found {}.", n, values.len()))); + return Err(Error::new_parse(format!("Invalid tuple dimension. Expected {}, found {}.", n, values.len()))); } let mut out = Vec::with_capacity(values.len() / n); @@ -538,24 +529,24 @@ macro_rules! impl_from_redis_tuple { Ok(out) } } - impl_from_redis_peel!($($name,)*); + impl_from_value_peel!($($name,)*); ) } -macro_rules! impl_from_redis_peel { - ($name:ident, $($other:ident,)*) => (impl_from_redis_tuple!($($other,)*);) +macro_rules! impl_from_value_peel { + ($name:ident, $($other:ident,)*) => (impl_from_value_tuple!($($other,)*);) } -impl_from_redis_tuple! { T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, } +impl_from_value_tuple! { T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, } -macro_rules! impl_from_str_from_redis_key ( +macro_rules! impl_from_str_from_key ( ($t:ty) => { - impl FromRedisKey for $t { - fn from_key(value: RedisKey) -> Result<$t, RedisError> { + impl FromKey for $t { + fn from_key(value: Key) -> Result<$t, Error> { value .as_str() .and_then(|k| k.parse::<$t>().ok()) - .ok_or(RedisError::new_parse("Cannot parse key from bytes.")) + .ok_or(Error::new_parse("Cannot parse key from bytes.")) } } } @@ -563,40 +554,40 @@ macro_rules! impl_from_str_from_redis_key ( #[cfg(feature = "serde-json")] #[cfg_attr(docsrs, doc(cfg(feature = "serde-json")))] -impl FromRedis for Value { - fn from_value(value: RedisValue) -> Result { +impl FromValue for serde_json::Value { + fn from_value(value: Value) -> Result { let value = match value { - RedisValue::Null => Value::Null, - RedisValue::Queued => QUEUED.into(), - RedisValue::String(s) => { + Value::Null => serde_json::Value::Null, + Value::Queued => QUEUED.into(), + Value::String(s) => { // check for nested json. this is particularly useful with JSON.GET serde_json::from_str(&s).ok().unwrap_or_else(|| s.to_string().into()) }, - RedisValue::Bytes(b) => { - let val = RedisValue::String(Str::from_inner(b)?); + Value::Bytes(b) => { + let val = Value::String(Str::from_inner(b)?); Self::from_value(val)? }, - RedisValue::Integer(i) => i.into(), - RedisValue::Double(f) => f.into(), - RedisValue::Boolean(b) => b.into(), - RedisValue::Array(v) => { + Value::Integer(i) => i.into(), + Value::Double(f) => f.into(), + Value::Boolean(b) => b.into(), + Value::Array(v) => { let mut out = Vec::with_capacity(v.len()); for value in v.into_iter() { out.push(Self::from_value(value)?); } - Value::Array(out) + serde_json::Value::Array(out) }, - RedisValue::Map(v) => { - let mut out = Map::with_capacity(v.len()); + Value::Map(v) => { + let mut out = serde_json::Map::with_capacity(v.len()); for (key, value) in v.inner().into_iter() { let key = key .into_string() - .ok_or(RedisError::new_parse("Cannot convert key to string."))?; + .ok_or(Error::new_parse("Cannot convert key to string."))?; let value = Self::from_value(value)?; out.insert(key, value); } - Value::Object(out) + serde_json::Value::Object(out) }, }; @@ -606,283 +597,281 @@ impl FromRedis for Value { #[cfg(feature = "i-geo")] #[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] -impl FromRedis for GeoPosition { - fn from_value(value: RedisValue) -> Result { +impl FromValue for GeoPosition { + fn from_value(value: Value) -> Result { GeoPosition::try_from(value) } } #[cfg(feature = "i-slowlog")] #[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] -impl FromRedis for SlowlogEntry { - fn from_value(value: RedisValue) -> Result { +impl FromValue for SlowlogEntry { + fn from_value(value: Value) -> Result { SlowlogEntry::try_from(value) } } #[cfg(feature = "i-cluster")] #[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] -impl FromRedis for ClusterInfo { - fn from_value(value: RedisValue) -> Result { +impl FromValue for ClusterInfo { + fn from_value(value: Value) -> Result { ClusterInfo::try_from(value) } } #[cfg(feature = "i-memory")] #[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] -impl FromRedis for MemoryStats { - fn from_value(value: RedisValue) -> Result { +impl FromValue for MemoryStats { + fn from_value(value: Value) -> Result { MemoryStats::try_from(value) } } #[cfg(feature = "i-memory")] #[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] -impl FromRedis for DatabaseMemoryStats { - fn from_value(value: RedisValue) -> Result { +impl FromValue for DatabaseMemoryStats { + fn from_value(value: Value) -> Result { DatabaseMemoryStats::try_from(value) } } -impl FromRedis for RedisKey { - fn from_value(value: RedisValue) -> Result { +impl FromValue for Key { + fn from_value(value: Value) -> Result { let key = match value { - RedisValue::Boolean(b) => b.into(), - RedisValue::Integer(i) => i.into(), - RedisValue::Double(f) => f.into(), - RedisValue::String(s) => s.into(), - RedisValue::Bytes(b) => b.into(), - RedisValue::Queued => RedisKey::from_static_str(QUEUED), - RedisValue::Map(_) | RedisValue::Array(_) => { - return Err(RedisError::new_parse("Cannot convert aggregate type to key.")) - }, - RedisValue::Null => return Err(RedisError::new(RedisErrorKind::NotFound, "Cannot convert nil to key.")), + Value::Boolean(b) => b.into(), + Value::Integer(i) => i.into(), + Value::Double(f) => f.into(), + Value::String(s) => s.into(), + Value::Bytes(b) => b.into(), + Value::Queued => Key::from_static_str(QUEUED), + Value::Map(_) | Value::Array(_) => return Err(Error::new_parse("Cannot convert aggregate type to key.")), + Value::Null => return Err(Error::new(ErrorKind::NotFound, "Cannot convert nil to key.")), }; Ok(key) } } -/// A trait used to convert [RedisKey](crate::types::RedisKey) values to various types. +/// A trait used to convert [Key](crate::types::Key) values to various types. /// -/// See the [convert](crate::types::RedisKey::convert) documentation for more information. -pub trait FromRedisKey: Sized { - fn from_key(value: RedisKey) -> Result; -} - -impl_from_str_from_redis_key!(u8); -impl_from_str_from_redis_key!(u16); -impl_from_str_from_redis_key!(u32); -impl_from_str_from_redis_key!(u64); -impl_from_str_from_redis_key!(u128); -impl_from_str_from_redis_key!(usize); -impl_from_str_from_redis_key!(i8); -impl_from_str_from_redis_key!(i16); -impl_from_str_from_redis_key!(i32); -impl_from_str_from_redis_key!(i64); -impl_from_str_from_redis_key!(i128); -impl_from_str_from_redis_key!(isize); -impl_from_str_from_redis_key!(f32); -impl_from_str_from_redis_key!(f64); - -impl FromRedisKey for () { - fn from_key(_: RedisKey) -> Result { +/// See the [convert](crate::types::Key::convert) documentation for more information. +pub trait FromKey: Sized { + fn from_key(value: Key) -> Result; +} + +impl_from_str_from_key!(u8); +impl_from_str_from_key!(u16); +impl_from_str_from_key!(u32); +impl_from_str_from_key!(u64); +impl_from_str_from_key!(u128); +impl_from_str_from_key!(usize); +impl_from_str_from_key!(i8); +impl_from_str_from_key!(i16); +impl_from_str_from_key!(i32); +impl_from_str_from_key!(i64); +impl_from_str_from_key!(i128); +impl_from_str_from_key!(isize); +impl_from_str_from_key!(f32); +impl_from_str_from_key!(f64); + +impl FromKey for () { + fn from_key(_: Key) -> Result { Ok(()) } } -impl FromRedisKey for RedisValue { - fn from_key(value: RedisKey) -> Result { - Ok(RedisValue::Bytes(value.into_bytes())) +impl FromKey for Value { + fn from_key(value: Key) -> Result { + Ok(Value::Bytes(value.into_bytes())) } } -impl FromRedisKey for RedisKey { - fn from_key(value: RedisKey) -> Result { +impl FromKey for Key { + fn from_key(value: Key) -> Result { Ok(value) } } -impl FromRedisKey for String { - fn from_key(value: RedisKey) -> Result { +impl FromKey for String { + fn from_key(value: Key) -> Result { value .into_string() - .ok_or(RedisError::new_parse("Cannot parse key as string.")) + .ok_or(Error::new_parse("Cannot parse key as string.")) } } -impl FromRedisKey for Str { - fn from_key(value: RedisKey) -> Result { +impl FromKey for Str { + fn from_key(value: Key) -> Result { Ok(Str::from_inner(value.into_bytes())?) } } -impl FromRedisKey for Vec { - fn from_key(value: RedisKey) -> Result { +impl FromKey for Vec { + fn from_key(value: Key) -> Result { Ok(value.into_bytes().to_vec()) } } -impl FromRedisKey for Bytes { - fn from_key(value: RedisKey) -> Result { +impl FromKey for Bytes { + fn from_key(value: Key) -> Result { Ok(value.into_bytes()) } } #[cfg(test)] mod tests { - use crate::types::RedisValue; + use crate::types::Value; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; #[cfg(not(feature = "default-nil-types"))] - use crate::error::RedisError; + use crate::error::Error; #[test] fn should_convert_signed_numeric_types() { - let _foo: i8 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: i8 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: i8 = RedisValue::Integer(123).convert().unwrap(); + let _foo: i8 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: i16 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: i16 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: i16 = RedisValue::Integer(123).convert().unwrap(); + let _foo: i16 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: i32 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: i32 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: i32 = RedisValue::Integer(123).convert().unwrap(); + let _foo: i32 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: i64 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: i64 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: i64 = RedisValue::Integer(123).convert().unwrap(); + let _foo: i64 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: i128 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: i128 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: i128 = RedisValue::Integer(123).convert().unwrap(); + let _foo: i128 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: isize = RedisValue::String("123".into()).convert().unwrap(); + let _foo: isize = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: isize = RedisValue::Integer(123).convert().unwrap(); + let _foo: isize = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: f32 = RedisValue::String("123.5".into()).convert().unwrap(); + let _foo: f32 = Value::String("123.5".into()).convert().unwrap(); assert_eq!(_foo, 123.5); - let _foo: f64 = RedisValue::String("123.5".into()).convert().unwrap(); + let _foo: f64 = Value::String("123.5".into()).convert().unwrap(); assert_eq!(_foo, 123.5); } #[test] fn should_convert_unsigned_numeric_types() { - let _foo: u8 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: u8 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: u8 = RedisValue::Integer(123).convert().unwrap(); + let _foo: u8 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: u16 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: u16 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: u16 = RedisValue::Integer(123).convert().unwrap(); + let _foo: u16 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: u32 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: u32 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: u32 = RedisValue::Integer(123).convert().unwrap(); + let _foo: u32 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: u64 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: u64 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: u64 = RedisValue::Integer(123).convert().unwrap(); + let _foo: u64 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: u128 = RedisValue::String("123".into()).convert().unwrap(); + let _foo: u128 = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: u128 = RedisValue::Integer(123).convert().unwrap(); + let _foo: u128 = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: usize = RedisValue::String("123".into()).convert().unwrap(); + let _foo: usize = Value::String("123".into()).convert().unwrap(); assert_eq!(_foo, 123); - let _foo: usize = RedisValue::Integer(123).convert().unwrap(); + let _foo: usize = Value::Integer(123).convert().unwrap(); assert_eq!(_foo, 123); } #[test] #[cfg(not(feature = "default-nil-types"))] fn should_return_not_found_with_null_number_types() { - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); - let result: Result = RedisValue::Null.convert(); + let result: Result = Value::Null.convert(); assert!(result.unwrap_err().is_not_found()); } #[test] #[cfg(feature = "default-nil-types")] fn should_return_zero_with_null_number_types() { - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0.0, RedisValue::Null.convert::().unwrap()); - assert_eq!(0.0, RedisValue::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0, Value::Null.convert::().unwrap()); + assert_eq!(0.0, Value::Null.convert::().unwrap()); + assert_eq!(0.0, Value::Null.convert::().unwrap()); } #[test] #[cfg(feature = "default-nil-types")] fn should_convert_null_to_false() { - assert!(!RedisValue::Null.convert::().unwrap()); + assert!(!Value::Null.convert::().unwrap()); } #[test] #[should_panic] #[cfg(not(feature = "default-nil-types"))] fn should_not_convert_null_to_false() { - assert!(!RedisValue::Null.convert::().unwrap()); + assert!(!Value::Null.convert::().unwrap()); } #[test] fn should_convert_strings() { - let _foo: String = RedisValue::String("foo".into()).convert().unwrap(); + let _foo: String = Value::String("foo".into()).convert().unwrap(); assert_eq!(_foo, "foo".to_owned()); } #[test] fn should_convert_numbers_to_bools() { - let foo: bool = RedisValue::Integer(0).convert().unwrap(); + let foo: bool = Value::Integer(0).convert().unwrap(); assert!(!foo); - let foo: bool = RedisValue::Integer(1).convert().unwrap(); + let foo: bool = Value::Integer(1).convert().unwrap(); assert!(foo); - let foo: bool = RedisValue::String("0".into()).convert().unwrap(); + let foo: bool = Value::String("0".into()).convert().unwrap(); assert!(!foo); - let foo: bool = RedisValue::String("1".into()).convert().unwrap(); + let foo: bool = Value::String("1".into()).convert().unwrap(); assert!(foo); } #[test] fn should_convert_bytes() { - let foo: Vec = RedisValue::Bytes("foo".as_bytes().to_vec().into()).convert().unwrap(); + let foo: Vec = Value::Bytes("foo".as_bytes().to_vec().into()).convert().unwrap(); assert_eq!(foo, "foo".as_bytes().to_vec()); - let foo: Vec = RedisValue::String("foo".into()).convert().unwrap(); + let foo: Vec = Value::String("foo".into()).convert().unwrap(); assert_eq!(foo, "foo".as_bytes().to_vec()); - let foo: Vec = RedisValue::Array(vec![102.into(), 111.into(), 111.into()]) + let foo: Vec = Value::Array(vec![102.into(), 111.into(), 111.into()]) .convert() .unwrap(); assert_eq!(foo, "foo".as_bytes().to_vec()); @@ -890,13 +879,13 @@ mod tests { #[test] fn should_convert_arrays() { - let foo: Vec = RedisValue::Array(vec!["a".into(), "b".into()]).convert().unwrap(); + let foo: Vec = Value::Array(vec!["a".into(), "b".into()]).convert().unwrap(); assert_eq!(foo, vec!["a".to_owned(), "b".to_owned()]); } #[test] fn should_convert_hash_maps() { - let foo: HashMap = RedisValue::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]) + let foo: HashMap = Value::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]) .convert() .unwrap(); @@ -908,7 +897,7 @@ mod tests { #[test] fn should_convert_hash_sets() { - let foo: HashSet = RedisValue::Array(vec!["a".into(), "b".into()]).convert().unwrap(); + let foo: HashSet = Value::Array(vec!["a".into(), "b".into()]).convert().unwrap(); let mut expected = HashSet::new(); expected.insert("a".to_owned()); @@ -918,7 +907,7 @@ mod tests { #[test] fn should_convert_btree_maps() { - let foo: BTreeMap = RedisValue::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]) + let foo: BTreeMap = Value::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]) .convert() .unwrap(); @@ -930,7 +919,7 @@ mod tests { #[test] fn should_convert_btree_sets() { - let foo: BTreeSet = RedisValue::Array(vec!["a".into(), "b".into()]).convert().unwrap(); + let foo: BTreeSet = Value::Array(vec!["a".into(), "b".into()]).convert().unwrap(); let mut expected = BTreeSet::new(); expected.insert("a".to_owned()); @@ -940,13 +929,13 @@ mod tests { #[test] fn should_convert_tuples() { - let foo: (String, i64) = RedisValue::Array(vec!["a".into(), 1.into()]).convert().unwrap(); + let foo: (String, i64) = Value::Array(vec!["a".into(), 1.into()]).convert().unwrap(); assert_eq!(foo, ("a".to_owned(), 1)); } #[test] fn should_convert_array_tuples() { - let foo: Vec<(String, i64)> = RedisValue::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]) + let foo: Vec<(String, i64)> = Value::Array(vec!["a".into(), 1.into(), "b".into(), 2.into()]) .convert() .unwrap(); assert_eq!(foo, vec![("a".to_owned(), 1), ("b".to_owned(), 2)]); @@ -954,39 +943,34 @@ mod tests { #[test] fn should_handle_single_element_vector_to_scalar() { - assert!(RedisValue::Array(vec![]).convert::().is_err()); - assert_eq!( - RedisValue::Array(vec!["foo".into()]).convert::(), - Ok("foo".into()) - ); - assert!(RedisValue::Array(vec!["foo".into(), "bar".into()]) + assert!(Value::Array(vec![]).convert::().is_err()); + assert_eq!(Value::Array(vec!["foo".into()]).convert::(), Ok("foo".into())); + assert!(Value::Array(vec!["foo".into(), "bar".into()]) .convert::() .is_err()); - assert_eq!(RedisValue::Array(vec![]).convert::>(), Ok(None)); + assert_eq!(Value::Array(vec![]).convert::>(), Ok(None)); assert_eq!( - RedisValue::Array(vec!["foo".into()]).convert::>(), + Value::Array(vec!["foo".into()]).convert::>(), Ok(Some("foo".into())) ); - assert!(RedisValue::Array(vec!["foo".into(), "bar".into()]) + assert!(Value::Array(vec!["foo".into(), "bar".into()]) .convert::>() .is_err()); } #[test] fn should_convert_null_to_empty_array() { - assert_eq!(Vec::::new(), RedisValue::Null.convert::>().unwrap()); - assert_eq!(Vec::::new(), RedisValue::Null.convert::>().unwrap()); + assert_eq!(Vec::::new(), Value::Null.convert::>().unwrap()); + assert_eq!(Vec::::new(), Value::Null.convert::>().unwrap()); } #[test] fn should_convert_to_fixed_arrays() { - let foo: [i64; 2] = RedisValue::Array(vec![1.into(), 2.into()]).convert().unwrap(); + let foo: [i64; 2] = Value::Array(vec![1.into(), 2.into()]).convert().unwrap(); assert_eq!(foo, [1, 2]); - assert!(RedisValue::Array(vec![1.into(), 2.into()]) - .convert::<[i64; 3]>() - .is_err()); - assert!(RedisValue::Array(vec![]).convert::<[i64; 3]>().is_err()); + assert!(Value::Array(vec![1.into(), 2.into()]).convert::<[i64; 3]>().is_err()); + assert!(Value::Array(vec![]).convert::<[i64; 3]>().is_err()); } } diff --git a/src/monitor/mod.rs b/src/monitor/mod.rs index 4b1adf38..29ede30b 100644 --- a/src/monitor/mod.rs +++ b/src/monitor/mod.rs @@ -1,6 +1,6 @@ use crate::{ - error::RedisError, - types::{RedisConfig, RedisValue}, + error::Error, + types::{config::Config, Value}, utils as client_utils, }; use futures::Stream; @@ -13,11 +13,11 @@ mod utils; /// /// Formatting with the [Display](https://doc.rust-lang.org/std/fmt/trait.Display.html) trait will print the same output as `redis-cli`. #[derive(Clone, Debug)] -pub struct Command { +pub struct MonitorCommand { /// The command run by the server. pub command: String, /// Arguments passed to the command. - pub args: Vec, + pub args: Vec, /// When the command was run on the server. pub timestamp: f64, /// The database against which the command was run. @@ -26,7 +26,7 @@ pub struct Command { pub client: String, } -impl PartialEq for Command { +impl PartialEq for MonitorCommand { fn eq(&self, other: &Self) -> bool { client_utils::f64_eq(self.timestamp, other.timestamp) && self.client == other.client @@ -36,9 +36,9 @@ impl PartialEq for Command { } } -impl Eq for Command {} +impl Eq for MonitorCommand {} -impl fmt::Display for Command { +impl fmt::Display for MonitorCommand { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, @@ -57,6 +57,6 @@ impl fmt::Display for Command { /// Run the [MONITOR](https://redis.io/commands/monitor) command against the provided server. /// /// Currently only centralized configurations are supported. -pub async fn run(config: RedisConfig) -> Result, RedisError> { +pub async fn run(config: Config) -> Result, Error> { utils::start(config).await } diff --git a/src/monitor/parser.rs b/src/monitor/parser.rs index f7021527..f73cd7cb 100644 --- a/src/monitor/parser.rs +++ b/src/monitor/parser.rs @@ -1,4 +1,4 @@ -use crate::{modules::inner::RedisClientInner, monitor::Command, runtime::RefCount, types::RedisValue}; +use crate::{modules::inner::ClientInner, monitor::MonitorCommand, runtime::RefCount, types::Value}; use nom::{ bytes::complete::{escaped as nom_escaped, tag as nom_tag, take as nom_take, take_until as nom_take_until}, character::complete::none_of as nom_none_of, @@ -27,12 +27,12 @@ fn to_u8(s: &str) -> Result> { .map_err(|e| RedisParseError::new_custom("to_u8", format!("{:?}", e))) } -fn to_redis_value(s: &[u8]) -> Result> { +fn to_redis_value(s: &[u8]) -> Result> { // TODO make this smarter in the future if let Ok(value) = str::from_utf8(s) { - Ok(RedisValue::String(value.into())) + Ok(Value::String(value.into())) } else { - Ok(RedisValue::Bytes(s.to_vec().into())) + Ok(Value::Bytes(s.to_vec().into())) } } @@ -78,7 +78,7 @@ fn d_parse_command(input: &[u8]) -> IResult<&[u8], String, RedisParseError<&[u8] Ok((input, command.to_owned())) } -fn d_parse_arg(input: &[u8]) -> IResult<&[u8], RedisValue, RedisParseError<&[u8]>> { +fn d_parse_arg(input: &[u8]) -> IResult<&[u8], Value, RedisParseError<&[u8]>> { let escaped_parser = nom_escaped(nom_none_of("\\\""), '\\', nom_tag(QUOTE)); nom_map_res( nom_terminated( @@ -89,18 +89,18 @@ fn d_parse_arg(input: &[u8]) -> IResult<&[u8], RedisValue, RedisParseError<&[u8] )(input) } -fn d_parse_args(input: &[u8]) -> IResult<&[u8], Vec, RedisParseError<&[u8]>> { +fn d_parse_args(input: &[u8]) -> IResult<&[u8], Vec, RedisParseError<&[u8]>> { nom_many0(d_parse_arg)(input) } -fn d_parse_frame(input: &[u8]) -> Result> { +fn d_parse_frame(input: &[u8]) -> Result> { let (input, timestamp) = d_parse_timestamp(input)?; let (input, db) = d_parse_db(input)?; let (input, client) = d_parse_client(input)?; let (input, command) = d_parse_command(input)?; let (_, args) = d_parse_args(input)?; - Ok(Command { + Ok(MonitorCommand { timestamp, db, client, @@ -110,7 +110,7 @@ fn d_parse_frame(input: &[u8]) -> Result> { } #[cfg(feature = "network-logs")] -fn log_frame(inner: &RefCount, frame: &[u8]) { +fn log_frame(inner: &RefCount, frame: &[u8]) { if let Ok(s) = str::from_utf8(frame) { _trace!(inner, "Monitor frame: {}", s); } else { @@ -119,9 +119,9 @@ fn log_frame(inner: &RefCount, frame: &[u8]) { } #[cfg(not(feature = "network-logs"))] -fn log_frame(_: &RefCount, _: &[u8]) {} +fn log_frame(_: &RefCount, _: &[u8]) {} -pub fn parse(inner: &RefCount, frame: Resp3Frame) -> Option { +pub fn parse(inner: &RefCount, frame: Resp3Frame) -> Option { let frame_bytes = match frame { Resp3Frame::SimpleString { ref data, .. } => data, Resp3Frame::BlobString { ref data, .. } => data, @@ -138,12 +138,12 @@ pub fn parse(inner: &RefCount, frame: Resp3Frame) -> Option, - frame: Result, -) -> Option { + inner: &RefCount, + frame: Result, +) -> Option { let frame = match frame { Ok(frame) => frame.into_resp3(), Err(e) => { @@ -33,7 +31,7 @@ async fn handle_monitor_frame( return None; }, }; - let frame_size = frame.encode_len(); + let frame_size = frame.encode_len(true); if frame_size >= inner.with_perf_config(|c| c.blocking_encode_threshold) { // since this isn't called from the Encoder/Decoder trait we can use spawn_blocking here @@ -55,9 +53,9 @@ async fn handle_monitor_frame( #[cfg(any(not(feature = "blocking-encoding"), feature = "glommio"))] async fn handle_monitor_frame( - inner: &RefCount, - frame: Result, -) -> Option { + inner: &RefCount, + frame: Result, +) -> Option { let frame = match frame { Ok(frame) => frame.into_resp3(), Err(e) => { @@ -70,12 +68,12 @@ async fn handle_monitor_frame( } async fn send_monitor_command( - inner: &RefCount, - mut connection: RedisTransport, -) -> Result { + inner: &RefCount, + mut connection: ExclusiveConnection, +) -> Result { _debug!(inner, "Sending MONITOR command."); - let command = RedisCommand::new(RedisCommandKind::Monitor, vec![]); + let command = Command::new(CommandKind::Monitor, vec![]); let frame = connection.request_response(command, inner.is_resp3()).await?; _trace!(inner, "Recv MONITOR response: {:?}", frame); @@ -85,15 +83,15 @@ async fn send_monitor_command( } async fn forward_results( - inner: &RefCount, - tx: UnboundedSender, - mut framed: Framed, + inner: &RefCount, + tx: Sender, + mut framed: Peekable>, ) where T: AsyncRead + AsyncWrite + Unpin + 'static, { while let Some(frame) = framed.next().await { if let Some(command) = handle_monitor_frame(inner, frame).await { - if let Err(_) = tx.send(command) { + if let Err(_) = tx.try_send(command) { _warn!(inner, "Stopping monitor stream."); return; } @@ -103,16 +101,12 @@ async fn forward_results( } } -async fn process_stream( - inner: &RefCount, - tx: UnboundedSender, - connection: RedisTransport, -) { +async fn process_stream(inner: &RefCount, tx: Sender, connection: ExclusiveConnection) { _debug!(inner, "Starting monitor stream processing..."); match connection.transport { ConnectionKind::Tcp(framed) => forward_results(inner, tx, framed).await, - #[cfg(feature = "enable-rustls")] + #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] ConnectionKind::Rustls(framed) => forward_results(inner, tx, framed).await, #[cfg(feature = "enable-native-tls")] ConnectionKind::NativeTls(framed) => forward_results(inner, tx, framed).await, @@ -123,23 +117,14 @@ async fn process_stream( _warn!(inner, "Stopping monitor stream."); } -pub async fn start(config: RedisConfig) -> Result, RedisError> { - let perf = PerformanceConfig { - auto_pipeline: false, - ..Default::default() - }; +pub async fn start(config: Config) -> Result, Error> { let connection = ConnectionConfig::default(); let server = match config.server { ServerConfig::Centralized { ref server } => server.clone(), - _ => { - return Err(RedisError::new( - RedisErrorKind::Config, - "Expected centralized server config.", - )) - }, + _ => return Err(Error::new(ErrorKind::Config, "Expected centralized server config.")), }; - let inner = RedisClientInner::new(config, perf, connection, None); + let inner = ClientInner::new(config, PerformanceConfig::default(), connection, None); let mut connection = connection::create(&inner, &server, None).await?; connection.setup(&inner, None).await?; let connection = send_monitor_command(&inner, connection).await?; @@ -147,15 +132,10 @@ pub async fn start(config: RedisConfig) -> Result, R // there isn't really a mechanism to surface backpressure to the server for the MONITOR stream, so we use a // background task with a channel to process the frames so that the server can keep sending data even if the // stream consumer slows down processing the frames. - let (tx, rx) = unbounded_channel(); - #[cfg(feature = "glommio")] - let tx = tx.into(); + let (tx, rx) = channel(0); spawn(async move { process_stream(&inner, tx, connection).await; }); - #[cfg(feature = "glommio")] - return Ok(crate::runtime::rx_stream(rx)); - #[cfg(not(feature = "glommio"))] - return Ok(UnboundedReceiverStream::new(rx)); + Ok(rx.into_stream()) } diff --git a/src/protocol/cluster.rs b/src/protocol/cluster.rs index 6fcf2679..3fec4755 100644 --- a/src/protocol/cluster.rs +++ b/src/protocol/cluster.rs @@ -1,9 +1,9 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, + error::{Error, ErrorKind}, + modules::inner::ClientInner, protocol::types::{Server, SlotRange}, runtime::RefCount, - types::RedisValue, + types::Value, utils, }; use bytes_utils::Str; @@ -16,20 +16,17 @@ use std::{collections::HashMap, net::IpAddr, str::FromStr}; ))] use crate::protocol::tls::TlsHostMapping; -fn parse_as_u16(value: RedisValue) -> Result { +fn parse_as_u16(value: Value) -> Result { match value { - RedisValue::Integer(i) => { + Value::Integer(i) => { if i < 0 || i > u16::MAX as i64 { - Err(RedisError::new(RedisErrorKind::Parse, "Invalid cluster slot integer.")) + Err(Error::new(ErrorKind::Parse, "Invalid cluster slot integer.")) } else { Ok(i as u16) } }, - RedisValue::String(s) => s.parse::().map_err(|e| e.into()), - _ => Err(RedisError::new( - RedisErrorKind::Parse, - "Could not parse value as cluster slot.", - )), + Value::String(s) => s.parse::().map_err(|e| e.into()), + _ => Err(Error::new(ErrorKind::Parse, "Could not parse value as cluster slot.")), } } @@ -58,12 +55,9 @@ fn check_metadata_hostname(data: &HashMap) -> Option<&Str> { /// The `default_host` is the host that returned the `CLUSTER SLOTS` response. /// /// -fn parse_cluster_slot_hostname(server: &[RedisValue], default_host: &Str) -> Result { +fn parse_cluster_slot_hostname(server: &[Value], default_host: &Str) -> Result { if server.is_empty() { - return Err(RedisError::new( - RedisErrorKind::Protocol, - "Invalid CLUSTER SLOTS server block.", - )); + return Err(Error::new(ErrorKind::Protocol, "Invalid CLUSTER SLOTS server block.")); } let should_parse_metadata = server.len() >= 4 && !server[3].is_null() && server[3].array_len().unwrap_or(0) > 0; @@ -80,8 +74,8 @@ fn parse_cluster_slot_hostname(server: &[RedisValue], default_host: &Str) -> Res let preferred_host = match server[0].clone().convert::() { Ok(host) => host, Err(_) => { - return Err(RedisError::new( - RedisErrorKind::Protocol, + return Err(Error::new( + ErrorKind::Protocol, "Invalid CLUSTER SLOTS server block hostname.", )) }, @@ -98,7 +92,7 @@ fn parse_cluster_slot_hostname(server: &[RedisValue], default_host: &Str) -> Res } /// Read the node block with format `|null, , , [metadata]` -fn parse_node_block(data: &[RedisValue], default_host: &Str) -> Option<(Str, u16, Str, Str)> { +fn parse_node_block(data: &[Value], default_host: &Str) -> Option<(Str, u16, Str, Str)> { if data.len() < 3 { return None; } @@ -119,11 +113,11 @@ fn parse_node_block(data: &[RedisValue], default_host: &Str) -> Option<(Str, u16 /// Parse the optional trailing replica nodes in each `CLUSTER SLOTS` slot range block. #[cfg(feature = "replicas")] -fn parse_cluster_slot_replica_nodes(slot_range: Vec, default_host: &Str) -> Vec { +fn parse_cluster_slot_replica_nodes(slot_range: Vec, default_host: &Str) -> Vec { slot_range .into_iter() .filter_map(|value| { - let server_block: Vec = match value.convert() { + let server_block: Vec = match value.convert() { Ok(v) => v, Err(_) => { warn!("Skip replica CLUSTER SLOTS block from {}", default_host); @@ -154,12 +148,9 @@ fn parse_cluster_slot_replica_nodes(slot_range: Vec, default_host: & } /// Parse the cluster slot range and associated server blocks. -fn parse_cluster_slot_nodes(mut slot_range: Vec, default_host: &Str) -> Result { +fn parse_cluster_slot_nodes(mut slot_range: Vec, default_host: &Str) -> Result { if slot_range.len() < 3 { - return Err(RedisError::new( - RedisErrorKind::Protocol, - "Invalid CLUSTER SLOTS response.", - )); + return Err(Error::new(ErrorKind::Protocol, "Invalid CLUSTER SLOTS response.")); } slot_range.reverse(); // length checked above @@ -168,15 +159,12 @@ fn parse_cluster_slot_nodes(mut slot_range: Vec, default_host: &Str) // the third value is the primary node, following values are optional replica nodes // length checked above. format is `|null, , , [metadata]` - let server_block: Vec = slot_range.pop().unwrap().convert()?; + let server_block: Vec = slot_range.pop().unwrap().convert()?; let (host, port, id) = match parse_node_block(&server_block, default_host) { Some((h, p, _, i)) => (h, p, i), None => { trace!("Failed to parse CLUSTER SLOTS response: {:?}", server_block); - return Err(RedisError::new( - RedisErrorKind::Cluster, - "Invalid CLUSTER SLOTS response.", - )); + return Err(Error::new(ErrorKind::Cluster, "Invalid CLUSTER SLOTS response.")); }, }; @@ -201,8 +189,8 @@ fn parse_cluster_slot_nodes(mut slot_range: Vec, default_host: &Str) /// Parse the entire CLUSTER SLOTS response with the provided `default_host` of the connection used to send the /// command. -pub fn parse_cluster_slots(frame: RedisValue, default_host: &Str) -> Result, RedisError> { - let slot_ranges: Vec> = frame.convert()?; +pub fn parse_cluster_slots(frame: Value, default_host: &Str) -> Result, Error> { + let slot_ranges: Vec> = frame.convert()?; let mut out: Vec = Vec::with_capacity(slot_ranges.len()); for slot_range in slot_ranges.into_iter() { @@ -235,11 +223,7 @@ fn replace_tls_server_names(policy: &TlsHostMapping, ranges: &mut [SlotRange], d feature = "enable-native-tls", feature = "enable-rustls-ring" ))] -pub fn modify_cluster_slot_hostnames( - inner: &RefCount, - ranges: &mut [SlotRange], - default_host: &Str, -) { +pub fn modify_cluster_slot_hostnames(inner: &RefCount, ranges: &mut [SlotRange], default_host: &Str) { let policy = match inner.config.tls { Some(ref config) => &config.hostnames, None => { @@ -260,7 +244,7 @@ pub fn modify_cluster_slot_hostnames( feature = "enable-native-tls", feature = "enable-rustls-ring" )))] -pub fn modify_cluster_slot_hostnames(inner: &RefCount, _: &mut Vec, _: &Str) { +pub fn modify_cluster_slot_hostnames(inner: &RefCount, _: &mut Vec, _: &Str) { _trace!(inner, "Skip modifying TLS hostnames.") } @@ -294,104 +278,104 @@ mod tests { } } - fn fake_cluster_slots_without_metadata() -> RedisValue { - let first_slot_range = RedisValue::Array(vec![ + fn fake_cluster_slots_without_metadata() -> Value { + let first_slot_range = Value::Array(vec![ 0.into(), 5460.into(), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30001.into(), "09dbe9720cda62f7865eabc5fd8857c5d2678366".into(), ]), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30004.into(), "821d8ca00d7ccf931ed3ffc7e3db0599d2271abf".into(), ]), ]); - let second_slot_range = RedisValue::Array(vec![ + let second_slot_range = Value::Array(vec![ 5461.into(), 10922.into(), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30002.into(), "c9d93d9f2c0c524ff34cc11838c2003d8c29e013".into(), ]), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30005.into(), "faadb3eb99009de4ab72ad6b6ed87634c7ee410f".into(), ]), ]); - let third_slot_range = RedisValue::Array(vec![ + let third_slot_range = Value::Array(vec![ 10923.into(), 16383.into(), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30003.into(), "044ec91f325b7595e76dbcb18cc688b6a5b434a1".into(), ]), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30006.into(), "58e6e48d41228013e5d9c1c37c5060693925e97e".into(), ]), ]); - RedisValue::Array(vec![first_slot_range, second_slot_range, third_slot_range]) + Value::Array(vec![first_slot_range, second_slot_range, third_slot_range]) } - fn fake_cluster_slots_with_metadata() -> RedisValue { - let first_slot_range = RedisValue::Array(vec![ + fn fake_cluster_slots_with_metadata() -> Value { + let first_slot_range = Value::Array(vec![ 0.into(), 5460.into(), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30001.into(), "09dbe9720cda62f7865eabc5fd8857c5d2678366".into(), - RedisValue::Array(vec!["hostname".into(), "host-1.redis.example.com".into()]), + Value::Array(vec!["hostname".into(), "host-1.redis.example.com".into()]), ]), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30004.into(), "821d8ca00d7ccf931ed3ffc7e3db0599d2271abf".into(), - RedisValue::Array(vec!["hostname".into(), "host-2.redis.example.com".into()]), + Value::Array(vec!["hostname".into(), "host-2.redis.example.com".into()]), ]), ]); - let second_slot_range = RedisValue::Array(vec![ + let second_slot_range = Value::Array(vec![ 5461.into(), 10922.into(), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30002.into(), "c9d93d9f2c0c524ff34cc11838c2003d8c29e013".into(), - RedisValue::Array(vec!["hostname".into(), "host-3.redis.example.com".into()]), + Value::Array(vec!["hostname".into(), "host-3.redis.example.com".into()]), ]), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30005.into(), "faadb3eb99009de4ab72ad6b6ed87634c7ee410f".into(), - RedisValue::Array(vec!["hostname".into(), "host-4.redis.example.com".into()]), + Value::Array(vec!["hostname".into(), "host-4.redis.example.com".into()]), ]), ]); - let third_slot_range = RedisValue::Array(vec![ + let third_slot_range = Value::Array(vec![ 10923.into(), 16383.into(), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30003.into(), "044ec91f325b7595e76dbcb18cc688b6a5b434a1".into(), - RedisValue::Array(vec!["hostname".into(), "host-5.redis.example.com".into()]), + Value::Array(vec!["hostname".into(), "host-5.redis.example.com".into()]), ]), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30006.into(), "58e6e48d41228013e5d9c1c37c5060693925e97e".into(), - RedisValue::Array(vec!["hostname".into(), "host-6.redis.example.com".into()]), + Value::Array(vec!["hostname".into(), "host-6.redis.example.com".into()]), ]), ]); - RedisValue::Array(vec![first_slot_range, second_slot_range, third_slot_range]) + Value::Array(vec![first_slot_range, second_slot_range, third_slot_range]) } #[test] @@ -645,55 +629,55 @@ mod tests { #[test] fn should_parse_cluster_slots_example_empty_metadata() { - let first_slot_range = RedisValue::Array(vec![ + let first_slot_range = Value::Array(vec![ 0.into(), 5460.into(), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30001.into(), "09dbe9720cda62f7865eabc5fd8857c5d2678366".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30004.into(), "821d8ca00d7ccf931ed3ffc7e3db0599d2271abf".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), ]); - let second_slot_range = RedisValue::Array(vec![ + let second_slot_range = Value::Array(vec![ 5461.into(), 10922.into(), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30002.into(), "c9d93d9f2c0c524ff34cc11838c2003d8c29e013".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30005.into(), "faadb3eb99009de4ab72ad6b6ed87634c7ee410f".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), ]); - let third_slot_range = RedisValue::Array(vec![ + let third_slot_range = Value::Array(vec![ 10923.into(), 16383.into(), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30003.into(), "044ec91f325b7595e76dbcb18cc688b6a5b434a1".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), - RedisValue::Array(vec![ + Value::Array(vec![ "127.0.0.1".into(), 30006.into(), "58e6e48d41228013e5d9c1c37c5060693925e97e".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), ]); - let input = RedisValue::Array(vec![first_slot_range, second_slot_range, third_slot_range]); + let input = Value::Array(vec![first_slot_range, second_slot_range, third_slot_range]); let actual = parse_cluster_slots(input, &Str::from("bad-host")).expect("Failed to parse input"); let expected = vec![ @@ -781,55 +765,55 @@ mod tests { #[test] fn should_parse_cluster_slots_example_null_hostname() { - let first_slot_range = RedisValue::Array(vec![ + let first_slot_range = Value::Array(vec![ 0.into(), 5460.into(), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30001.into(), "09dbe9720cda62f7865eabc5fd8857c5d2678366".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30004.into(), "821d8ca00d7ccf931ed3ffc7e3db0599d2271abf".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), ]); - let second_slot_range = RedisValue::Array(vec![ + let second_slot_range = Value::Array(vec![ 5461.into(), 10922.into(), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30002.into(), "c9d93d9f2c0c524ff34cc11838c2003d8c29e013".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30005.into(), "faadb3eb99009de4ab72ad6b6ed87634c7ee410f".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), ]); - let third_slot_range = RedisValue::Array(vec![ + let third_slot_range = Value::Array(vec![ 10923.into(), 16383.into(), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30003.into(), "044ec91f325b7595e76dbcb18cc688b6a5b434a1".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30006.into(), "58e6e48d41228013e5d9c1c37c5060693925e97e".into(), - RedisValue::Array(vec![]), + Value::Array(vec![]), ]), ]); - let input = RedisValue::Array(vec![first_slot_range, second_slot_range, third_slot_range]); + let input = Value::Array(vec![first_slot_range, second_slot_range, third_slot_range]); let actual = parse_cluster_slots(input, &Str::from("fake-host")).expect("Failed to parse input"); let expected = vec![ @@ -917,55 +901,55 @@ mod tests { #[test] fn should_parse_cluster_slots_example_empty_hostname() { - let first_slot_range = RedisValue::Array(vec![ + let first_slot_range = Value::Array(vec![ 0.into(), 5460.into(), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30001.into(), "09dbe9720cda62f7865eabc5fd8857c5d2678366".into(), - RedisValue::Array(vec!["hostname".into(), "".into()]), + Value::Array(vec!["hostname".into(), "".into()]), ]), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30004.into(), "821d8ca00d7ccf931ed3ffc7e3db0599d2271abf".into(), - RedisValue::Array(vec!["hostname".into(), "".into()]), + Value::Array(vec!["hostname".into(), "".into()]), ]), ]); - let second_slot_range = RedisValue::Array(vec![ + let second_slot_range = Value::Array(vec![ 5461.into(), 10922.into(), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30002.into(), "c9d93d9f2c0c524ff34cc11838c2003d8c29e013".into(), - RedisValue::Array(vec!["hostname".into(), "".into()]), + Value::Array(vec!["hostname".into(), "".into()]), ]), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30005.into(), "faadb3eb99009de4ab72ad6b6ed87634c7ee410f".into(), - RedisValue::Array(vec!["hostname".into(), "".into()]), + Value::Array(vec!["hostname".into(), "".into()]), ]), ]); - let third_slot_range = RedisValue::Array(vec![ + let third_slot_range = Value::Array(vec![ 10923.into(), 16383.into(), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30003.into(), "044ec91f325b7595e76dbcb18cc688b6a5b434a1".into(), - RedisValue::Array(vec!["hostname".into(), "".into()]), + Value::Array(vec!["hostname".into(), "".into()]), ]), - RedisValue::Array(vec![ - RedisValue::Null, + Value::Array(vec![ + Value::Null, 30006.into(), "58e6e48d41228013e5d9c1c37c5060693925e97e".into(), - RedisValue::Array(vec!["hostname".into(), "".into()]), + Value::Array(vec!["hostname".into(), "".into()]), ]), ]); - let input = RedisValue::Array(vec![first_slot_range, second_slot_range, third_slot_range]); + let input = Value::Array(vec![first_slot_range, second_slot_range, third_slot_range]); let actual = parse_cluster_slots(input, &Str::from("fake-host")).expect("Failed to parse input"); let expected = vec![ diff --git a/src/protocol/codec.rs b/src/protocol/codec.rs index a69315f7..59597219 100644 --- a/src/protocol/codec.rs +++ b/src/protocol/codec.rs @@ -1,6 +1,6 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, + error::{Error, ErrorKind}, + modules::inner::ClientInner, protocol::{ types::{ProtocolFrame, Server}, utils as protocol_utils, @@ -34,12 +34,10 @@ fn log_resp2_frame(_: &str, _: &Resp2Frame, _: bool) {} #[cfg(not(feature = "network-logs"))] fn log_resp3_frame(_: &str, _: &Resp3Frame, _: bool) {} #[cfg(feature = "network-logs")] -pub use crate::protocol::debug::log_resp2_frame; -#[cfg(feature = "network-logs")] -pub use crate::protocol::debug::log_resp3_frame; +pub use crate::protocol::debug::*; #[cfg(feature = "metrics")] -fn sample_stats(codec: &RedisCodec, decode: bool, value: i64) { +fn sample_stats(codec: &Codec, decode: bool, value: i64) { if decode { codec.res_size_stats.write().sample(value); } else { @@ -48,12 +46,11 @@ fn sample_stats(codec: &RedisCodec, decode: bool, value: i64) { } #[cfg(not(feature = "metrics"))] -fn sample_stats(_: &RedisCodec, _: bool, _: i64) {} +fn sample_stats(_: &Codec, _: bool, _: i64) {} -fn resp2_encode_frame(codec: &RedisCodec, item: Resp2Frame, dst: &mut BytesMut) -> Result<(), RedisError> { +fn resp2_encode_frame(codec: &Codec, item: Resp2Frame, dst: &mut BytesMut) -> Result<(), Error> { let offset = dst.len(); - - let res = resp2_encode(dst, &item)?; + let res = resp2_encode(dst, &item, true)?; let len = res.saturating_sub(offset); trace!( @@ -69,7 +66,7 @@ fn resp2_encode_frame(codec: &RedisCodec, item: Resp2Frame, dst: &mut BytesMut) Ok(()) } -fn resp2_decode_frame(codec: &RedisCodec, src: &mut BytesMut) -> Result, RedisError> { +fn resp2_decode_frame(codec: &Codec, src: &mut BytesMut) -> Result, Error> { trace!( "{}: Recv {} bytes from {} (RESP2).", codec.name, @@ -91,10 +88,9 @@ fn resp2_decode_frame(codec: &RedisCodec, src: &mut BytesMut) -> Result Result<(), RedisError> { +fn resp3_encode_frame(codec: &Codec, item: Resp3Frame, dst: &mut BytesMut) -> Result<(), Error> { let offset = dst.len(); - - let res = resp3_encode(dst, &item)?; + let res = resp3_encode(dst, &item, true)?; let len = res.saturating_sub(offset); trace!( @@ -110,7 +106,7 @@ fn resp3_encode_frame(codec: &RedisCodec, item: Resp3Frame, dst: &mut BytesMut) Ok(()) } -fn resp3_decode_frame(codec: &mut RedisCodec, src: &mut BytesMut) -> Result, RedisError> { +fn resp3_decode_frame(codec: &mut Codec, src: &mut BytesMut) -> Result, Error> { trace!( "{}: Recv {} bytes from {} (RESP3).", codec.name, @@ -125,8 +121,8 @@ fn resp3_decode_frame(codec: &mut RedisCodec, src: &mut BytesMut) -> Result Result Result, RedisError> { +fn resp2_decode_with_fallback(codec: &mut Codec, src: &mut BytesMut) -> Result, Error> { let resp2_result = resp2_decode_frame(codec, src).map(|f| f.map(|f| f.into())); if resp2_result.is_err() { let resp3_result = resp3_decode_frame(codec, src).map(|f| f.map(|f| f.into())); @@ -189,7 +182,7 @@ fn resp2_decode_with_fallback( } } -pub struct RedisCodec { +pub struct Codec { pub name: Str, pub server: Server, pub resp3: RefCount, @@ -200,9 +193,9 @@ pub struct RedisCodec { pub res_size_stats: RefCount>, } -impl RedisCodec { - pub fn new(inner: &RefCount, server: &Server) -> Self { - RedisCodec { +impl Codec { + pub fn new(inner: &RefCount, server: &Server) -> Self { + Codec { server: server.clone(), name: inner.id.clone(), resp3: inner.shared_resp3(), @@ -219,8 +212,8 @@ impl RedisCodec { } } -impl Encoder for RedisCodec { - type Error = RedisError; +impl Encoder for Codec { + type Error = Error; fn encode(&mut self, item: ProtocolFrame, dst: &mut BytesMut) -> Result<(), Self::Error> { match item { @@ -230,8 +223,8 @@ impl Encoder for RedisCodec { } } -impl Decoder for RedisCodec { - type Error = RedisError; +impl Decoder for Codec { + type Error = Error; type Item = ProtocolFrame; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { diff --git a/src/protocol/command.rs b/src/protocol/command.rs index effdac84..3bfcbf40 100644 --- a/src/protocol/command.rs +++ b/src/protocol/command.rs @@ -1,16 +1,16 @@ use crate::{ - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces::Resp3Frame, - modules::inner::RedisClientInner, + modules::inner::ClientInner, protocol::{ hashers::ClusterHash, responders::ResponseKind, types::{ProtocolFrame, Server}, utils as protocol_utils, }, - runtime::{oneshot_channel, AtomicBool, Mutex, OneshotReceiver, OneshotSender, RefCount}, + runtime::{AtomicBool, OneshotSender, RefCount}, trace, - types::{CustomCommand, RedisValue}, + types::{CustomCommand, Value}, utils as client_utils, utils, }; @@ -39,47 +39,8 @@ pub fn command_counter() -> usize { .saturating_add(1) } -/// A command interface for communication between connection reader tasks and the router. -/// -/// Use of this interface assumes that a command was **not** pipelined. The reader task may instead -/// choose to communicate with the router via the shared command queue if no channel exists on -/// which to send this command. -#[derive(Debug)] -pub enum RouterResponse { - /// Continue with the next command. - Continue, - /// Retry the command immediately against the provided server, but with an `ASKING` prefix. - /// - /// Typically used with transactions to retry the entire transaction against a different node. - /// - /// Reader tasks will attempt to use the router channel first when handling cluster errors, but - /// may fall back to communication via the command channel in the context of pipelined commands. - Ask((u16, Server, RedisCommand)), - /// Retry the command immediately against the provided server, updating the cached routing table first. - /// - /// Reader tasks will attempt to use the router channel first when handling cluster errors, but - /// may fall back to communication via the command channel in the context of pipelined commands. - Moved((u16, Server, RedisCommand)), - /// Indicate to the router that the provided transaction command failed with the associated error. - /// - /// The router is responsible for responding to the caller with the error, if needed. Transaction commands are - /// never pipelined. - TransactionError((RedisError, RedisCommand)), - /// Indicates to the router that the transaction finished with the associated result. - TransactionResult(Resp3Frame), - /// Indicates that the connection closed while the command was in-flight. - /// - /// This is only used for non-pipelined commands where the router task is blocked on a response before - /// checking the next command. - ConnectionClosed((RedisError, RedisCommand)), -} - /// A channel for communication between connection reader tasks and futures returned to the caller. -pub type ResponseSender = OneshotSender>; -/// A sender channel for communication between connection reader tasks and the router. -pub type RouterSender = OneshotSender; -/// A receiver channel for communication between connection reader tasks and the router. -pub type RouterReceiver = OneshotReceiver; +pub type ResponseSender = OneshotSender>; #[derive(Clone, Debug, Eq, PartialEq)] pub enum ClusterErrorKind { @@ -97,23 +58,20 @@ impl fmt::Display for ClusterErrorKind { } impl<'a> TryFrom<&'a str> for ClusterErrorKind { - type Error = RedisError; + type Error = Error; fn try_from(value: &'a str) -> Result { match value { "MOVED" => Ok(ClusterErrorKind::Moved), "ASK" => Ok(ClusterErrorKind::Ask), - _ => Err(RedisError::new( - RedisErrorKind::Protocol, - "Expected MOVED or ASK error.", - )), + _ => Err(Error::new(ErrorKind::Protocol, "Expected MOVED or ASK error.")), } } } // TODO organize these and gate them w/ the appropriate feature flags #[derive(Clone, Eq, PartialEq)] -pub enum RedisCommandKind { +pub enum CommandKind { AclLoad, AclSave, AclList, @@ -505,78 +463,72 @@ pub enum RedisCommandKind { _Custom(CustomCommand), } -impl fmt::Debug for RedisCommandKind { +impl fmt::Debug for CommandKind { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.to_str_debug()) } } -impl RedisCommandKind { +impl CommandKind { pub fn is_scan(&self) -> bool { - matches!(*self, RedisCommandKind::Scan) + matches!(*self, CommandKind::Scan) } pub fn is_hscan(&self) -> bool { - matches!(*self, RedisCommandKind::Hscan) + matches!(*self, CommandKind::Hscan) } pub fn is_sscan(&self) -> bool { - matches!(*self, RedisCommandKind::Sscan) + matches!(*self, CommandKind::Sscan) } pub fn is_zscan(&self) -> bool { - matches!(*self, RedisCommandKind::Zscan) + matches!(*self, CommandKind::Zscan) } pub fn is_hello(&self) -> bool { - matches!( - *self, - RedisCommandKind::_Hello(_) | RedisCommandKind::_HelloAllCluster(_) - ) + matches!(*self, CommandKind::_Hello(_) | CommandKind::_HelloAllCluster(_)) } pub fn is_auth(&self) -> bool { - matches!(*self, RedisCommandKind::Auth) + matches!(*self, CommandKind::Auth) } pub fn is_value_scan(&self) -> bool { - matches!( - *self, - RedisCommandKind::Zscan | RedisCommandKind::Hscan | RedisCommandKind::Sscan - ) + matches!(*self, CommandKind::Zscan | CommandKind::Hscan | CommandKind::Sscan) } pub fn is_multi(&self) -> bool { - matches!(*self, RedisCommandKind::Multi) + matches!(*self, CommandKind::Multi) } pub fn is_exec(&self) -> bool { - matches!(*self, RedisCommandKind::Exec) + matches!(*self, CommandKind::Exec) } pub fn is_discard(&self) -> bool { - matches!(*self, RedisCommandKind::Discard) + matches!(*self, CommandKind::Discard) } pub fn ends_transaction(&self) -> bool { - matches!(*self, RedisCommandKind::Exec | RedisCommandKind::Discard) + matches!(*self, CommandKind::Exec | CommandKind::Discard) } pub fn is_mset(&self) -> bool { - matches!(*self, RedisCommandKind::Mset | RedisCommandKind::Msetnx) + matches!(*self, CommandKind::Mset | CommandKind::Msetnx) } pub fn is_custom(&self) -> bool { - matches!(*self, RedisCommandKind::_Custom(_)) + matches!(*self, CommandKind::_Custom(_)) } pub fn closes_connection(&self) -> bool { - matches!(*self, RedisCommandKind::Quit | RedisCommandKind::Shutdown) + matches!(*self, CommandKind::Quit | CommandKind::Shutdown) } pub fn custom_hash_slot(&self) -> Option { match self { - RedisCommandKind::_Custom(ref cmd) => match cmd.cluster_hash { + CommandKind::_Custom(ref cmd) => match cmd.cluster_hash { ClusterHash::Custom(ref val) => Some(*val), _ => None, }, @@ -589,384 +541,384 @@ impl RedisCommandKind { /// Typically used for logging or debugging. pub fn to_str_debug(&self) -> &str { match *self { - RedisCommandKind::AclLoad => "ACL LOAD", - RedisCommandKind::AclSave => "ACL SAVE", - RedisCommandKind::AclList => "ACL LIST", - RedisCommandKind::AclUsers => "ACL USERS", - RedisCommandKind::AclGetUser => "ACL GETUSER", - RedisCommandKind::AclSetUser => "ACL SETUSER", - RedisCommandKind::AclDelUser => "ACL DELUSER", - RedisCommandKind::AclCat => "ACL CAT", - RedisCommandKind::AclGenPass => "ACL GENPASS", - RedisCommandKind::AclWhoAmI => "ACL WHOAMI", - RedisCommandKind::AclLog => "ACL LOG", - RedisCommandKind::AclHelp => "ACL HELP", - RedisCommandKind::Append => "APPEND", - RedisCommandKind::Auth => "AUTH", - RedisCommandKind::Asking => "ASKING", - RedisCommandKind::BgreWriteAof => "BGREWRITEAOF", - RedisCommandKind::BgSave => "BGSAVE", - RedisCommandKind::BitCount => "BITCOUNT", - RedisCommandKind::BitField => "BITFIELD", - RedisCommandKind::BitOp => "BITOP", - RedisCommandKind::BitPos => "BITPOS", - RedisCommandKind::BlPop => "BLPOP", - RedisCommandKind::BlMove => "BLMOVE", - RedisCommandKind::BrPop => "BRPOP", - RedisCommandKind::BzmPop => "BZMPOP", - RedisCommandKind::BlmPop => "BLMPOP", - RedisCommandKind::BrPopLPush => "BRPOPLPUSH", - RedisCommandKind::BzPopMin => "BZPOPMIN", - RedisCommandKind::BzPopMax => "BZPOPMAX", - RedisCommandKind::ClientID => "CLIENT ID", - RedisCommandKind::ClientInfo => "CLIENT INFO", - RedisCommandKind::ClientKill => "CLIENT KILL", - RedisCommandKind::ClientList => "CLIENT LIST", - RedisCommandKind::ClientGetName => "CLIENT GETNAME", - RedisCommandKind::ClientPause => "CLIENT PAUSE", - RedisCommandKind::ClientUnpause => "CLIENT UNPAUSE", - RedisCommandKind::ClientUnblock => "CLIENT UNBLOCK", - RedisCommandKind::ClientReply => "CLIENT REPLY", - RedisCommandKind::ClientSetname => "CLIENT SETNAME", - RedisCommandKind::ClientGetRedir => "CLIENT GETREDIR", - RedisCommandKind::ClientTracking => "CLIENT TRACKING", - RedisCommandKind::ClientTrackingInfo => "CLIENT TRACKINGINFO", - RedisCommandKind::ClientCaching => "CLIENT CACHING", - RedisCommandKind::ClusterAddSlots => "CLUSTER ADDSLOTS", - RedisCommandKind::ClusterCountFailureReports => "CLUSTER COUNT-FAILURE-REPORTS", - RedisCommandKind::ClusterCountKeysInSlot => "CLUSTER COUNTKEYSINSLOT", - RedisCommandKind::ClusterDelSlots => "CLUSTER DEL SLOTS", - RedisCommandKind::ClusterFailOver => "CLUSTER FAILOVER", - RedisCommandKind::ClusterForget => "CLUSTER FORGET", - RedisCommandKind::ClusterGetKeysInSlot => "CLUSTER GETKEYSINSLOTS", - RedisCommandKind::ClusterInfo => "CLUSTER INFO", - RedisCommandKind::ClusterKeySlot => "CLUSTER KEYSLOT", - RedisCommandKind::ClusterMeet => "CLUSTER MEET", - RedisCommandKind::ClusterNodes => "CLUSTER NODES", - RedisCommandKind::ClusterReplicate => "CLUSTER REPLICATE", - RedisCommandKind::ClusterReset => "CLUSTER RESET", - RedisCommandKind::ClusterSaveConfig => "CLUSTER SAVECONFIG", - RedisCommandKind::ClusterSetConfigEpoch => "CLUSTER SET-CONFIG-EPOCH", - RedisCommandKind::ClusterSetSlot => "CLUSTER SETSLOT", - RedisCommandKind::ClusterReplicas => "CLUSTER REPLICAS", - RedisCommandKind::ClusterSlots => "CLUSTER SLOTS", - RedisCommandKind::ClusterBumpEpoch => "CLUSTER BUMPEPOCH", - RedisCommandKind::ClusterFlushSlots => "CLUSTER FLUSHSLOTS", - RedisCommandKind::ClusterMyID => "CLUSTER MYID", - RedisCommandKind::ConfigGet => "CONFIG GET", - RedisCommandKind::ConfigRewrite => "CONFIG REWRITE", - RedisCommandKind::ConfigSet => "CONFIG SET", - RedisCommandKind::ConfigResetStat => "CONFIG RESETSTAT", - RedisCommandKind::Copy => "COPY", - RedisCommandKind::DBSize => "DBSIZE", - RedisCommandKind::Decr => "DECR", - RedisCommandKind::DecrBy => "DECRBY", - RedisCommandKind::Del => "DEL", - RedisCommandKind::Discard => "DISCARD", - RedisCommandKind::Dump => "DUMP", - RedisCommandKind::Echo => "ECHO", - RedisCommandKind::Eval => "EVAL", - RedisCommandKind::EvalSha => "EVALSHA", - RedisCommandKind::Exec => "EXEC", - RedisCommandKind::Exists => "EXISTS", - RedisCommandKind::Expire => "EXPIRE", - RedisCommandKind::ExpireAt => "EXPIREAT", - RedisCommandKind::ExpireTime => "EXPIRETIME", - RedisCommandKind::Failover => "FAILOVER", - RedisCommandKind::FlushAll => "FLUSHALL", - RedisCommandKind::FlushDB => "FLUSHDB", - RedisCommandKind::GeoAdd => "GEOADD", - RedisCommandKind::GeoHash => "GEOHASH", - RedisCommandKind::GeoPos => "GEOPOS", - RedisCommandKind::GeoDist => "GEODIST", - RedisCommandKind::GeoRadius => "GEORADIUS", - RedisCommandKind::GeoRadiusByMember => "GEORADIUSBYMEMBER", - RedisCommandKind::GeoSearch => "GEOSEARCH", - RedisCommandKind::GeoSearchStore => "GEOSEARCHSTORE", - RedisCommandKind::Get => "GET", - RedisCommandKind::GetDel => "GETDEL", - RedisCommandKind::GetBit => "GETBIT", - RedisCommandKind::GetRange => "GETRANGE", - RedisCommandKind::GetSet => "GETSET", - RedisCommandKind::HDel => "HDEL", - RedisCommandKind::_Hello(_) => "HELLO", - RedisCommandKind::HExists => "HEXISTS", - RedisCommandKind::HGet => "HGET", - RedisCommandKind::HGetAll => "HGETALL", - RedisCommandKind::HIncrBy => "HINCRBY", - RedisCommandKind::HIncrByFloat => "HINCRBYFLOAT", - RedisCommandKind::HKeys => "HKEYS", - RedisCommandKind::HLen => "HLEN", - RedisCommandKind::HMGet => "HMGET", - RedisCommandKind::HMSet => "HMSET", - RedisCommandKind::HSet => "HSET", - RedisCommandKind::HSetNx => "HSETNX", - RedisCommandKind::HStrLen => "HSTRLEN", - RedisCommandKind::HRandField => "HRANDFIELD", - RedisCommandKind::HTtl => "HTTL", - RedisCommandKind::HExpire => "HEXPIRE", - RedisCommandKind::HExpireAt => "HEXPIREAT", - RedisCommandKind::HExpireTime => "HEXPIRETIME", - RedisCommandKind::HPersist => "HPERSIST", - RedisCommandKind::HPTtl => "HPTTL", - RedisCommandKind::HPExpire => "HPEXPIRE", - RedisCommandKind::HPExpireAt => "HPEXPIREAT", - RedisCommandKind::HPExpireTime => "HPEXPIRETIME", - RedisCommandKind::HVals => "HVALS", - RedisCommandKind::Incr => "INCR", - RedisCommandKind::IncrBy => "INCRBY", - RedisCommandKind::IncrByFloat => "INCRBYFLOAT", - RedisCommandKind::Info => "INFO", - RedisCommandKind::Keys => "KEYS", - RedisCommandKind::LastSave => "LASTSAVE", - RedisCommandKind::LIndex => "LINDEX", - RedisCommandKind::LInsert => "LINSERT", - RedisCommandKind::LLen => "LLEN", - RedisCommandKind::LMove => "LMOVE", - RedisCommandKind::LPop => "LPOP", - RedisCommandKind::LPos => "LPOS", - RedisCommandKind::LPush => "LPUSH", - RedisCommandKind::LPushX => "LPUSHX", - RedisCommandKind::LRange => "LRANGE", - RedisCommandKind::LMPop => "LMPOP", - RedisCommandKind::LRem => "LREM", - RedisCommandKind::LSet => "LSET", - RedisCommandKind::LTrim => "LTRIM", - RedisCommandKind::Lcs => "LCS", - RedisCommandKind::MemoryDoctor => "MEMORY DOCTOR", - RedisCommandKind::MemoryHelp => "MEMORY HELP", - RedisCommandKind::MemoryMallocStats => "MEMORY MALLOC-STATS", - RedisCommandKind::MemoryPurge => "MEMORY PURGE", - RedisCommandKind::MemoryStats => "MEMORY STATS", - RedisCommandKind::MemoryUsage => "MEMORY USAGE", - RedisCommandKind::Mget => "MGET", - RedisCommandKind::Migrate => "MIGRATE", - RedisCommandKind::Monitor => "MONITOR", - RedisCommandKind::Move => "MOVE", - RedisCommandKind::Mset => "MSET", - RedisCommandKind::Msetnx => "MSETNX", - RedisCommandKind::Multi => "MULTI", - RedisCommandKind::Object => "OBJECT", - RedisCommandKind::Persist => "PERSIST", - RedisCommandKind::Pexpire => "PEXPIRE", - RedisCommandKind::Pexpireat => "PEXPIREAT", - RedisCommandKind::PexpireTime => "PEXPIRETIME", - RedisCommandKind::Pfadd => "PFADD", - RedisCommandKind::Pfcount => "PFCOUNT", - RedisCommandKind::Pfmerge => "PFMERGE", - RedisCommandKind::Ping => "PING", - RedisCommandKind::Psetex => "PSETEX", - RedisCommandKind::Psubscribe => "PSUBSCRIBE", - RedisCommandKind::Pttl => "PTTL", - RedisCommandKind::Publish => "PUBLISH", - RedisCommandKind::Punsubscribe => "PUNSUBSCRIBE", - RedisCommandKind::Quit => "QUIT", - RedisCommandKind::Randomkey => "RANDOMKEY", - RedisCommandKind::Readonly => "READONLY", - RedisCommandKind::Readwrite => "READWRITE", - RedisCommandKind::Rename => "RENAME", - RedisCommandKind::Renamenx => "RENAMENX", - RedisCommandKind::Restore => "RESTORE", - RedisCommandKind::Role => "ROLE", - RedisCommandKind::Rpop => "RPOP", - RedisCommandKind::Rpoplpush => "RPOPLPUSH", - RedisCommandKind::Rpush => "RPUSH", - RedisCommandKind::Rpushx => "RPUSHX", - RedisCommandKind::Sadd => "SADD", - RedisCommandKind::Save => "SAVE", - RedisCommandKind::Scard => "SCARD", - RedisCommandKind::Sdiff => "SDIFF", - RedisCommandKind::Sdiffstore => "SDIFFSTORE", - RedisCommandKind::Select => "SELECT", - RedisCommandKind::Sentinel => "SENTINEL", - RedisCommandKind::Set => "SET", - RedisCommandKind::Setbit => "SETBIT", - RedisCommandKind::Setex => "SETEX", - RedisCommandKind::Setnx => "SETNX", - RedisCommandKind::Setrange => "SETRANGE", - RedisCommandKind::Shutdown => "SHUTDOWN", - RedisCommandKind::Sinter => "SINTER", - RedisCommandKind::Sinterstore => "SINTERSTORE", - RedisCommandKind::Sismember => "SISMEMBER", - RedisCommandKind::Replicaof => "REPLICAOF", - RedisCommandKind::Slowlog => "SLOWLOG", - RedisCommandKind::Smembers => "SMEMBERS", - RedisCommandKind::Smismember => "SMISMEMBER", - RedisCommandKind::Smove => "SMOVE", - RedisCommandKind::Sort => "SORT", - RedisCommandKind::SortRo => "SORT_RO", - RedisCommandKind::Spop => "SPOP", - RedisCommandKind::Srandmember => "SRANDMEMBER", - RedisCommandKind::Srem => "SREM", - RedisCommandKind::Strlen => "STRLEN", - RedisCommandKind::Subscribe => "SUBSCRIBE", - RedisCommandKind::Sunion => "SUNION", - RedisCommandKind::Sunionstore => "SUNIONSTORE", - RedisCommandKind::Swapdb => "SWAPDB", - RedisCommandKind::Sync => "SYNC", - RedisCommandKind::Time => "TIME", - RedisCommandKind::Touch => "TOUCH", - RedisCommandKind::Ttl => "TTL", - RedisCommandKind::Type => "TYPE", - RedisCommandKind::Unsubscribe => "UNSUBSCRIBE", - RedisCommandKind::Unlink => "UNLINK", - RedisCommandKind::Unwatch => "UNWATCH", - RedisCommandKind::Wait => "WAIT", - RedisCommandKind::Watch => "WATCH", - RedisCommandKind::XinfoConsumers => "XINFO CONSUMERS", - RedisCommandKind::XinfoGroups => "XINFO GROUPS", - RedisCommandKind::XinfoStream => "XINFO STREAM", - RedisCommandKind::Xadd => "XADD", - RedisCommandKind::Xtrim => "XTRIM", - RedisCommandKind::Xdel => "XDEL", - RedisCommandKind::Xrange => "XRANGE", - RedisCommandKind::Xrevrange => "XREVRANGE", - RedisCommandKind::Xlen => "XLEN", - RedisCommandKind::Xread => "XREAD", - RedisCommandKind::Xgroupcreate => "XGROUP CREATE", - RedisCommandKind::XgroupCreateConsumer => "XGROUP CREATECONSUMER", - RedisCommandKind::XgroupDelConsumer => "XGROUP DELCONSUMER", - RedisCommandKind::XgroupDestroy => "XGROUP DESTROY", - RedisCommandKind::XgroupSetId => "XGROUP SETID", - RedisCommandKind::Xreadgroup => "XREADGROUP", - RedisCommandKind::Xack => "XACK", - RedisCommandKind::Xclaim => "XCLAIM", - RedisCommandKind::Xautoclaim => "XAUTOCLAIM", - RedisCommandKind::Xpending => "XPENDING", - RedisCommandKind::Zadd => "ZADD", - RedisCommandKind::Zcard => "ZCARD", - RedisCommandKind::Zcount => "ZCOUNT", - RedisCommandKind::Zdiff => "ZDIFF", - RedisCommandKind::Zdiffstore => "ZDIFFSTORE", - RedisCommandKind::Zincrby => "ZINCRBY", - RedisCommandKind::Zinter => "ZINTER", - RedisCommandKind::Zinterstore => "ZINTERSTORE", - RedisCommandKind::Zlexcount => "ZLEXCOUNT", - RedisCommandKind::Zrandmember => "ZRANDMEMBER", - RedisCommandKind::Zrange => "ZRANGE", - RedisCommandKind::Zrangestore => "ZRANGESTORE", - RedisCommandKind::Zrangebylex => "ZRANGEBYLEX", - RedisCommandKind::Zrangebyscore => "ZRANGEBYSCORE", - RedisCommandKind::Zrank => "ZRANK", - RedisCommandKind::Zrem => "ZREM", - RedisCommandKind::Zremrangebylex => "ZREMRANGEBYLEX", - RedisCommandKind::Zremrangebyrank => "ZREMRANGEBYRANK", - RedisCommandKind::Zremrangebyscore => "ZREMRANGEBYSCORE", - RedisCommandKind::Zrevrange => "ZREVRANGE", - RedisCommandKind::Zrevrangebylex => "ZREVRANGEBYLEX", - RedisCommandKind::Zrevrangebyscore => "ZREVRANGEBYSCORE", - RedisCommandKind::Zrevrank => "ZREVRANK", - RedisCommandKind::Zscore => "ZSCORE", - RedisCommandKind::Zmscore => "ZMSCORE", - RedisCommandKind::Zunion => "ZUNION", - RedisCommandKind::Zunionstore => "ZUNIONSTORE", - RedisCommandKind::Zpopmax => "ZPOPMAX", - RedisCommandKind::Zpopmin => "ZPOPMIN", - RedisCommandKind::Zmpop => "ZMPOP", - RedisCommandKind::Scan => "SCAN", - RedisCommandKind::Sscan => "SSCAN", - RedisCommandKind::Hscan => "HSCAN", - RedisCommandKind::Zscan => "ZSCAN", - RedisCommandKind::ScriptDebug => "SCRIPT DEBUG", - RedisCommandKind::ScriptExists => "SCRIPT EXISTS", - RedisCommandKind::ScriptFlush => "SCRIPT FLUSH", - RedisCommandKind::ScriptKill => "SCRIPT KILL", - RedisCommandKind::ScriptLoad => "SCRIPT LOAD", - RedisCommandKind::Spublish => "SPUBLISH", - RedisCommandKind::Ssubscribe => "SSUBSCRIBE", - RedisCommandKind::Sunsubscribe => "SUNSUBSCRIBE", - RedisCommandKind::_AuthAllCluster => "AUTH ALL CLUSTER", - RedisCommandKind::_HelloAllCluster(_) => "HELLO ALL CLUSTER", - RedisCommandKind::_FlushAllCluster => "FLUSHALL CLUSTER", - RedisCommandKind::_ScriptFlushCluster => "SCRIPT FLUSH CLUSTER", - RedisCommandKind::_ScriptLoadCluster => "SCRIPT LOAD CLUSTER", - RedisCommandKind::_ScriptKillCluster => "SCRIPT Kill CLUSTER", - RedisCommandKind::_FunctionLoadCluster => "FUNCTION LOAD CLUSTER", - RedisCommandKind::_FunctionFlushCluster => "FUNCTION FLUSH CLUSTER", - RedisCommandKind::_FunctionDeleteCluster => "FUNCTION DELETE CLUSTER", - RedisCommandKind::_FunctionRestoreCluster => "FUNCTION RESTORE CLUSTER", - RedisCommandKind::_ClientTrackingCluster => "CLIENT TRACKING CLUSTER", - RedisCommandKind::Fcall => "FCALL", - RedisCommandKind::FcallRO => "FCALL_RO", - RedisCommandKind::FunctionDelete => "FUNCTION DELETE", - RedisCommandKind::FunctionDump => "FUNCTION DUMP", - RedisCommandKind::FunctionFlush => "FUNCTION FLUSH", - RedisCommandKind::FunctionKill => "FUNCTION KILL", - RedisCommandKind::FunctionList => "FUNCTION LIST", - RedisCommandKind::FunctionLoad => "FUNCTION LOAD", - RedisCommandKind::FunctionRestore => "FUNCTION RESTORE", - RedisCommandKind::FunctionStats => "FUNCTION STATS", - RedisCommandKind::PubsubChannels => "PUBSUB CHANNELS", - RedisCommandKind::PubsubNumpat => "PUBSUB NUMPAT", - RedisCommandKind::PubsubNumsub => "PUBSUB NUMSUB", - RedisCommandKind::PubsubShardchannels => "PUBSUB SHARDCHANNELS", - RedisCommandKind::PubsubShardnumsub => "PUBSUB SHARDNUMSUB", - RedisCommandKind::JsonArrAppend => "JSON.ARRAPPEND", - RedisCommandKind::JsonArrIndex => "JSON.ARRINDEX", - RedisCommandKind::JsonArrInsert => "JSON.ARRINSERT", - RedisCommandKind::JsonArrLen => "JSON.ARRLEN", - RedisCommandKind::JsonArrPop => "JSON.ARRPOP", - RedisCommandKind::JsonArrTrim => "JSON.ARRTRIM", - RedisCommandKind::JsonClear => "JSON.CLEAR", - RedisCommandKind::JsonDebugMemory => "JSON.DEBUG MEMORY", - RedisCommandKind::JsonDel => "JSON.DEL", - RedisCommandKind::JsonGet => "JSON.GET", - RedisCommandKind::JsonMerge => "JSON.MERGE", - RedisCommandKind::JsonMGet => "JSON.MGET", - RedisCommandKind::JsonMSet => "JSON.MSET", - RedisCommandKind::JsonNumIncrBy => "JSON.NUMINCRBY", - RedisCommandKind::JsonObjKeys => "JSON.OBJKEYS", - RedisCommandKind::JsonObjLen => "JSON.OBJLEN", - RedisCommandKind::JsonResp => "JSON.RESP", - RedisCommandKind::JsonSet => "JSON.SET", - RedisCommandKind::JsonStrAppend => "JSON.STRAPPEND", - RedisCommandKind::JsonStrLen => "JSON.STRLEN", - RedisCommandKind::JsonToggle => "JSON.TOGGLE", - RedisCommandKind::JsonType => "JSON.TYPE", - RedisCommandKind::TsAdd => "TS.ADD", - RedisCommandKind::TsAlter => "TS.ALTER", - RedisCommandKind::TsCreate => "TS.CREATE", - RedisCommandKind::TsCreateRule => "TS.CREATERULE", - RedisCommandKind::TsDecrBy => "TS.DECRBY", - RedisCommandKind::TsDel => "TS.DEL", - RedisCommandKind::TsDeleteRule => "TS.DELETERULE", - RedisCommandKind::TsGet => "TS.GET", - RedisCommandKind::TsIncrBy => "TS.INCRBY", - RedisCommandKind::TsInfo => "TS.INFO", - RedisCommandKind::TsMAdd => "TS.MADD", - RedisCommandKind::TsMGet => "TS.MGET", - RedisCommandKind::TsMRange => "TS.MRANGE", - RedisCommandKind::TsMRevRange => "TS.MREVRANGE", - RedisCommandKind::TsQueryIndex => "TS.QUERYINDEX", - RedisCommandKind::TsRange => "TS.RANGE", - RedisCommandKind::TsRevRange => "TS.REVRANGE", - RedisCommandKind::FtList => "FT._LIST", - RedisCommandKind::FtAggregate => "FT.AGGREGATE", - RedisCommandKind::FtSearch => "FT.SEARCH", - RedisCommandKind::FtCreate => "FT.CREATE", - RedisCommandKind::FtAlter => "FT.ALTER", - RedisCommandKind::FtAliasAdd => "FT.ALIASADD", - RedisCommandKind::FtAliasDel => "FT.ALIASDEL", - RedisCommandKind::FtAliasUpdate => "FT.ALIASUPDATE", - RedisCommandKind::FtConfigGet => "FT.CONFIG GET", - RedisCommandKind::FtConfigSet => "FT.CONFIG SET", - RedisCommandKind::FtCursorDel => "FT.CURSOR DEL", - RedisCommandKind::FtCursorRead => "FT.CURSOR READ", - RedisCommandKind::FtDictAdd => "FT.DICTADD", - RedisCommandKind::FtDictDel => "FT.DICTDEL", - RedisCommandKind::FtDictDump => "FT.DICTDUMP", - RedisCommandKind::FtDropIndex => "FT.DROPINDEX", - RedisCommandKind::FtExplain => "FT.EXPLAIN", - RedisCommandKind::FtInfo => "FT.INFO", - RedisCommandKind::FtSpellCheck => "FT.SPELLCHECK", - RedisCommandKind::FtSugAdd => "FT.SUGADD", - RedisCommandKind::FtSugDel => "FT.SUGDEL", - RedisCommandKind::FtSugGet => "FT.SUGGET", - RedisCommandKind::FtSugLen => "FT.SUGLEN", - RedisCommandKind::FtSynDump => "FT.SYNDUMP", - RedisCommandKind::FtSynUpdate => "FT.SYNUPDATE", - RedisCommandKind::FtTagVals => "FT.TAGVALS", - RedisCommandKind::_Custom(ref kind) => &kind.cmd, + CommandKind::AclLoad => "ACL LOAD", + CommandKind::AclSave => "ACL SAVE", + CommandKind::AclList => "ACL LIST", + CommandKind::AclUsers => "ACL USERS", + CommandKind::AclGetUser => "ACL GETUSER", + CommandKind::AclSetUser => "ACL SETUSER", + CommandKind::AclDelUser => "ACL DELUSER", + CommandKind::AclCat => "ACL CAT", + CommandKind::AclGenPass => "ACL GENPASS", + CommandKind::AclWhoAmI => "ACL WHOAMI", + CommandKind::AclLog => "ACL LOG", + CommandKind::AclHelp => "ACL HELP", + CommandKind::Append => "APPEND", + CommandKind::Auth => "AUTH", + CommandKind::Asking => "ASKING", + CommandKind::BgreWriteAof => "BGREWRITEAOF", + CommandKind::BgSave => "BGSAVE", + CommandKind::BitCount => "BITCOUNT", + CommandKind::BitField => "BITFIELD", + CommandKind::BitOp => "BITOP", + CommandKind::BitPos => "BITPOS", + CommandKind::BlPop => "BLPOP", + CommandKind::BlMove => "BLMOVE", + CommandKind::BrPop => "BRPOP", + CommandKind::BzmPop => "BZMPOP", + CommandKind::BlmPop => "BLMPOP", + CommandKind::BrPopLPush => "BRPOPLPUSH", + CommandKind::BzPopMin => "BZPOPMIN", + CommandKind::BzPopMax => "BZPOPMAX", + CommandKind::ClientID => "CLIENT ID", + CommandKind::ClientInfo => "CLIENT INFO", + CommandKind::ClientKill => "CLIENT KILL", + CommandKind::ClientList => "CLIENT LIST", + CommandKind::ClientGetName => "CLIENT GETNAME", + CommandKind::ClientPause => "CLIENT PAUSE", + CommandKind::ClientUnpause => "CLIENT UNPAUSE", + CommandKind::ClientUnblock => "CLIENT UNBLOCK", + CommandKind::ClientReply => "CLIENT REPLY", + CommandKind::ClientSetname => "CLIENT SETNAME", + CommandKind::ClientGetRedir => "CLIENT GETREDIR", + CommandKind::ClientTracking => "CLIENT TRACKING", + CommandKind::ClientTrackingInfo => "CLIENT TRACKINGINFO", + CommandKind::ClientCaching => "CLIENT CACHING", + CommandKind::ClusterAddSlots => "CLUSTER ADDSLOTS", + CommandKind::ClusterCountFailureReports => "CLUSTER COUNT-FAILURE-REPORTS", + CommandKind::ClusterCountKeysInSlot => "CLUSTER COUNTKEYSINSLOT", + CommandKind::ClusterDelSlots => "CLUSTER DEL SLOTS", + CommandKind::ClusterFailOver => "CLUSTER FAILOVER", + CommandKind::ClusterForget => "CLUSTER FORGET", + CommandKind::ClusterGetKeysInSlot => "CLUSTER GETKEYSINSLOTS", + CommandKind::ClusterInfo => "CLUSTER INFO", + CommandKind::ClusterKeySlot => "CLUSTER KEYSLOT", + CommandKind::ClusterMeet => "CLUSTER MEET", + CommandKind::ClusterNodes => "CLUSTER NODES", + CommandKind::ClusterReplicate => "CLUSTER REPLICATE", + CommandKind::ClusterReset => "CLUSTER RESET", + CommandKind::ClusterSaveConfig => "CLUSTER SAVECONFIG", + CommandKind::ClusterSetConfigEpoch => "CLUSTER SET-CONFIG-EPOCH", + CommandKind::ClusterSetSlot => "CLUSTER SETSLOT", + CommandKind::ClusterReplicas => "CLUSTER REPLICAS", + CommandKind::ClusterSlots => "CLUSTER SLOTS", + CommandKind::ClusterBumpEpoch => "CLUSTER BUMPEPOCH", + CommandKind::ClusterFlushSlots => "CLUSTER FLUSHSLOTS", + CommandKind::ClusterMyID => "CLUSTER MYID", + CommandKind::ConfigGet => "CONFIG GET", + CommandKind::ConfigRewrite => "CONFIG REWRITE", + CommandKind::ConfigSet => "CONFIG SET", + CommandKind::ConfigResetStat => "CONFIG RESETSTAT", + CommandKind::Copy => "COPY", + CommandKind::DBSize => "DBSIZE", + CommandKind::Decr => "DECR", + CommandKind::DecrBy => "DECRBY", + CommandKind::Del => "DEL", + CommandKind::Discard => "DISCARD", + CommandKind::Dump => "DUMP", + CommandKind::Echo => "ECHO", + CommandKind::Eval => "EVAL", + CommandKind::EvalSha => "EVALSHA", + CommandKind::Exec => "EXEC", + CommandKind::Exists => "EXISTS", + CommandKind::Expire => "EXPIRE", + CommandKind::ExpireAt => "EXPIREAT", + CommandKind::ExpireTime => "EXPIRETIME", + CommandKind::Failover => "FAILOVER", + CommandKind::FlushAll => "FLUSHALL", + CommandKind::FlushDB => "FLUSHDB", + CommandKind::GeoAdd => "GEOADD", + CommandKind::GeoHash => "GEOHASH", + CommandKind::GeoPos => "GEOPOS", + CommandKind::GeoDist => "GEODIST", + CommandKind::GeoRadius => "GEORADIUS", + CommandKind::GeoRadiusByMember => "GEORADIUSBYMEMBER", + CommandKind::GeoSearch => "GEOSEARCH", + CommandKind::GeoSearchStore => "GEOSEARCHSTORE", + CommandKind::Get => "GET", + CommandKind::GetDel => "GETDEL", + CommandKind::GetBit => "GETBIT", + CommandKind::GetRange => "GETRANGE", + CommandKind::GetSet => "GETSET", + CommandKind::HDel => "HDEL", + CommandKind::_Hello(_) => "HELLO", + CommandKind::HExists => "HEXISTS", + CommandKind::HGet => "HGET", + CommandKind::HGetAll => "HGETALL", + CommandKind::HIncrBy => "HINCRBY", + CommandKind::HIncrByFloat => "HINCRBYFLOAT", + CommandKind::HKeys => "HKEYS", + CommandKind::HLen => "HLEN", + CommandKind::HMGet => "HMGET", + CommandKind::HMSet => "HMSET", + CommandKind::HSet => "HSET", + CommandKind::HSetNx => "HSETNX", + CommandKind::HStrLen => "HSTRLEN", + CommandKind::HRandField => "HRANDFIELD", + CommandKind::HTtl => "HTTL", + CommandKind::HExpire => "HEXPIRE", + CommandKind::HExpireAt => "HEXPIREAT", + CommandKind::HExpireTime => "HEXPIRETIME", + CommandKind::HPersist => "HPERSIST", + CommandKind::HPTtl => "HPTTL", + CommandKind::HPExpire => "HPEXPIRE", + CommandKind::HPExpireAt => "HPEXPIREAT", + CommandKind::HPExpireTime => "HPEXPIRETIME", + CommandKind::HVals => "HVALS", + CommandKind::Incr => "INCR", + CommandKind::IncrBy => "INCRBY", + CommandKind::IncrByFloat => "INCRBYFLOAT", + CommandKind::Info => "INFO", + CommandKind::Keys => "KEYS", + CommandKind::LastSave => "LASTSAVE", + CommandKind::LIndex => "LINDEX", + CommandKind::LInsert => "LINSERT", + CommandKind::LLen => "LLEN", + CommandKind::LMove => "LMOVE", + CommandKind::LPop => "LPOP", + CommandKind::LPos => "LPOS", + CommandKind::LPush => "LPUSH", + CommandKind::LPushX => "LPUSHX", + CommandKind::LRange => "LRANGE", + CommandKind::LMPop => "LMPOP", + CommandKind::LRem => "LREM", + CommandKind::LSet => "LSET", + CommandKind::LTrim => "LTRIM", + CommandKind::Lcs => "LCS", + CommandKind::MemoryDoctor => "MEMORY DOCTOR", + CommandKind::MemoryHelp => "MEMORY HELP", + CommandKind::MemoryMallocStats => "MEMORY MALLOC-STATS", + CommandKind::MemoryPurge => "MEMORY PURGE", + CommandKind::MemoryStats => "MEMORY STATS", + CommandKind::MemoryUsage => "MEMORY USAGE", + CommandKind::Mget => "MGET", + CommandKind::Migrate => "MIGRATE", + CommandKind::Monitor => "MONITOR", + CommandKind::Move => "MOVE", + CommandKind::Mset => "MSET", + CommandKind::Msetnx => "MSETNX", + CommandKind::Multi => "MULTI", + CommandKind::Object => "OBJECT", + CommandKind::Persist => "PERSIST", + CommandKind::Pexpire => "PEXPIRE", + CommandKind::Pexpireat => "PEXPIREAT", + CommandKind::PexpireTime => "PEXPIRETIME", + CommandKind::Pfadd => "PFADD", + CommandKind::Pfcount => "PFCOUNT", + CommandKind::Pfmerge => "PFMERGE", + CommandKind::Ping => "PING", + CommandKind::Psetex => "PSETEX", + CommandKind::Psubscribe => "PSUBSCRIBE", + CommandKind::Pttl => "PTTL", + CommandKind::Publish => "PUBLISH", + CommandKind::Punsubscribe => "PUNSUBSCRIBE", + CommandKind::Quit => "QUIT", + CommandKind::Randomkey => "RANDOMKEY", + CommandKind::Readonly => "READONLY", + CommandKind::Readwrite => "READWRITE", + CommandKind::Rename => "RENAME", + CommandKind::Renamenx => "RENAMENX", + CommandKind::Restore => "RESTORE", + CommandKind::Role => "ROLE", + CommandKind::Rpop => "RPOP", + CommandKind::Rpoplpush => "RPOPLPUSH", + CommandKind::Rpush => "RPUSH", + CommandKind::Rpushx => "RPUSHX", + CommandKind::Sadd => "SADD", + CommandKind::Save => "SAVE", + CommandKind::Scard => "SCARD", + CommandKind::Sdiff => "SDIFF", + CommandKind::Sdiffstore => "SDIFFSTORE", + CommandKind::Select => "SELECT", + CommandKind::Sentinel => "SENTINEL", + CommandKind::Set => "SET", + CommandKind::Setbit => "SETBIT", + CommandKind::Setex => "SETEX", + CommandKind::Setnx => "SETNX", + CommandKind::Setrange => "SETRANGE", + CommandKind::Shutdown => "SHUTDOWN", + CommandKind::Sinter => "SINTER", + CommandKind::Sinterstore => "SINTERSTORE", + CommandKind::Sismember => "SISMEMBER", + CommandKind::Replicaof => "REPLICAOF", + CommandKind::Slowlog => "SLOWLOG", + CommandKind::Smembers => "SMEMBERS", + CommandKind::Smismember => "SMISMEMBER", + CommandKind::Smove => "SMOVE", + CommandKind::Sort => "SORT", + CommandKind::SortRo => "SORT_RO", + CommandKind::Spop => "SPOP", + CommandKind::Srandmember => "SRANDMEMBER", + CommandKind::Srem => "SREM", + CommandKind::Strlen => "STRLEN", + CommandKind::Subscribe => "SUBSCRIBE", + CommandKind::Sunion => "SUNION", + CommandKind::Sunionstore => "SUNIONSTORE", + CommandKind::Swapdb => "SWAPDB", + CommandKind::Sync => "SYNC", + CommandKind::Time => "TIME", + CommandKind::Touch => "TOUCH", + CommandKind::Ttl => "TTL", + CommandKind::Type => "TYPE", + CommandKind::Unsubscribe => "UNSUBSCRIBE", + CommandKind::Unlink => "UNLINK", + CommandKind::Unwatch => "UNWATCH", + CommandKind::Wait => "WAIT", + CommandKind::Watch => "WATCH", + CommandKind::XinfoConsumers => "XINFO CONSUMERS", + CommandKind::XinfoGroups => "XINFO GROUPS", + CommandKind::XinfoStream => "XINFO STREAM", + CommandKind::Xadd => "XADD", + CommandKind::Xtrim => "XTRIM", + CommandKind::Xdel => "XDEL", + CommandKind::Xrange => "XRANGE", + CommandKind::Xrevrange => "XREVRANGE", + CommandKind::Xlen => "XLEN", + CommandKind::Xread => "XREAD", + CommandKind::Xgroupcreate => "XGROUP CREATE", + CommandKind::XgroupCreateConsumer => "XGROUP CREATECONSUMER", + CommandKind::XgroupDelConsumer => "XGROUP DELCONSUMER", + CommandKind::XgroupDestroy => "XGROUP DESTROY", + CommandKind::XgroupSetId => "XGROUP SETID", + CommandKind::Xreadgroup => "XREADGROUP", + CommandKind::Xack => "XACK", + CommandKind::Xclaim => "XCLAIM", + CommandKind::Xautoclaim => "XAUTOCLAIM", + CommandKind::Xpending => "XPENDING", + CommandKind::Zadd => "ZADD", + CommandKind::Zcard => "ZCARD", + CommandKind::Zcount => "ZCOUNT", + CommandKind::Zdiff => "ZDIFF", + CommandKind::Zdiffstore => "ZDIFFSTORE", + CommandKind::Zincrby => "ZINCRBY", + CommandKind::Zinter => "ZINTER", + CommandKind::Zinterstore => "ZINTERSTORE", + CommandKind::Zlexcount => "ZLEXCOUNT", + CommandKind::Zrandmember => "ZRANDMEMBER", + CommandKind::Zrange => "ZRANGE", + CommandKind::Zrangestore => "ZRANGESTORE", + CommandKind::Zrangebylex => "ZRANGEBYLEX", + CommandKind::Zrangebyscore => "ZRANGEBYSCORE", + CommandKind::Zrank => "ZRANK", + CommandKind::Zrem => "ZREM", + CommandKind::Zremrangebylex => "ZREMRANGEBYLEX", + CommandKind::Zremrangebyrank => "ZREMRANGEBYRANK", + CommandKind::Zremrangebyscore => "ZREMRANGEBYSCORE", + CommandKind::Zrevrange => "ZREVRANGE", + CommandKind::Zrevrangebylex => "ZREVRANGEBYLEX", + CommandKind::Zrevrangebyscore => "ZREVRANGEBYSCORE", + CommandKind::Zrevrank => "ZREVRANK", + CommandKind::Zscore => "ZSCORE", + CommandKind::Zmscore => "ZMSCORE", + CommandKind::Zunion => "ZUNION", + CommandKind::Zunionstore => "ZUNIONSTORE", + CommandKind::Zpopmax => "ZPOPMAX", + CommandKind::Zpopmin => "ZPOPMIN", + CommandKind::Zmpop => "ZMPOP", + CommandKind::Scan => "SCAN", + CommandKind::Sscan => "SSCAN", + CommandKind::Hscan => "HSCAN", + CommandKind::Zscan => "ZSCAN", + CommandKind::ScriptDebug => "SCRIPT DEBUG", + CommandKind::ScriptExists => "SCRIPT EXISTS", + CommandKind::ScriptFlush => "SCRIPT FLUSH", + CommandKind::ScriptKill => "SCRIPT KILL", + CommandKind::ScriptLoad => "SCRIPT LOAD", + CommandKind::Spublish => "SPUBLISH", + CommandKind::Ssubscribe => "SSUBSCRIBE", + CommandKind::Sunsubscribe => "SUNSUBSCRIBE", + CommandKind::_AuthAllCluster => "AUTH ALL CLUSTER", + CommandKind::_HelloAllCluster(_) => "HELLO ALL CLUSTER", + CommandKind::_FlushAllCluster => "FLUSHALL CLUSTER", + CommandKind::_ScriptFlushCluster => "SCRIPT FLUSH CLUSTER", + CommandKind::_ScriptLoadCluster => "SCRIPT LOAD CLUSTER", + CommandKind::_ScriptKillCluster => "SCRIPT Kill CLUSTER", + CommandKind::_FunctionLoadCluster => "FUNCTION LOAD CLUSTER", + CommandKind::_FunctionFlushCluster => "FUNCTION FLUSH CLUSTER", + CommandKind::_FunctionDeleteCluster => "FUNCTION DELETE CLUSTER", + CommandKind::_FunctionRestoreCluster => "FUNCTION RESTORE CLUSTER", + CommandKind::_ClientTrackingCluster => "CLIENT TRACKING CLUSTER", + CommandKind::Fcall => "FCALL", + CommandKind::FcallRO => "FCALL_RO", + CommandKind::FunctionDelete => "FUNCTION DELETE", + CommandKind::FunctionDump => "FUNCTION DUMP", + CommandKind::FunctionFlush => "FUNCTION FLUSH", + CommandKind::FunctionKill => "FUNCTION KILL", + CommandKind::FunctionList => "FUNCTION LIST", + CommandKind::FunctionLoad => "FUNCTION LOAD", + CommandKind::FunctionRestore => "FUNCTION RESTORE", + CommandKind::FunctionStats => "FUNCTION STATS", + CommandKind::PubsubChannels => "PUBSUB CHANNELS", + CommandKind::PubsubNumpat => "PUBSUB NUMPAT", + CommandKind::PubsubNumsub => "PUBSUB NUMSUB", + CommandKind::PubsubShardchannels => "PUBSUB SHARDCHANNELS", + CommandKind::PubsubShardnumsub => "PUBSUB SHARDNUMSUB", + CommandKind::JsonArrAppend => "JSON.ARRAPPEND", + CommandKind::JsonArrIndex => "JSON.ARRINDEX", + CommandKind::JsonArrInsert => "JSON.ARRINSERT", + CommandKind::JsonArrLen => "JSON.ARRLEN", + CommandKind::JsonArrPop => "JSON.ARRPOP", + CommandKind::JsonArrTrim => "JSON.ARRTRIM", + CommandKind::JsonClear => "JSON.CLEAR", + CommandKind::JsonDebugMemory => "JSON.DEBUG MEMORY", + CommandKind::JsonDel => "JSON.DEL", + CommandKind::JsonGet => "JSON.GET", + CommandKind::JsonMerge => "JSON.MERGE", + CommandKind::JsonMGet => "JSON.MGET", + CommandKind::JsonMSet => "JSON.MSET", + CommandKind::JsonNumIncrBy => "JSON.NUMINCRBY", + CommandKind::JsonObjKeys => "JSON.OBJKEYS", + CommandKind::JsonObjLen => "JSON.OBJLEN", + CommandKind::JsonResp => "JSON.RESP", + CommandKind::JsonSet => "JSON.SET", + CommandKind::JsonStrAppend => "JSON.STRAPPEND", + CommandKind::JsonStrLen => "JSON.STRLEN", + CommandKind::JsonToggle => "JSON.TOGGLE", + CommandKind::JsonType => "JSON.TYPE", + CommandKind::TsAdd => "TS.ADD", + CommandKind::TsAlter => "TS.ALTER", + CommandKind::TsCreate => "TS.CREATE", + CommandKind::TsCreateRule => "TS.CREATERULE", + CommandKind::TsDecrBy => "TS.DECRBY", + CommandKind::TsDel => "TS.DEL", + CommandKind::TsDeleteRule => "TS.DELETERULE", + CommandKind::TsGet => "TS.GET", + CommandKind::TsIncrBy => "TS.INCRBY", + CommandKind::TsInfo => "TS.INFO", + CommandKind::TsMAdd => "TS.MADD", + CommandKind::TsMGet => "TS.MGET", + CommandKind::TsMRange => "TS.MRANGE", + CommandKind::TsMRevRange => "TS.MREVRANGE", + CommandKind::TsQueryIndex => "TS.QUERYINDEX", + CommandKind::TsRange => "TS.RANGE", + CommandKind::TsRevRange => "TS.REVRANGE", + CommandKind::FtList => "FT._LIST", + CommandKind::FtAggregate => "FT.AGGREGATE", + CommandKind::FtSearch => "FT.SEARCH", + CommandKind::FtCreate => "FT.CREATE", + CommandKind::FtAlter => "FT.ALTER", + CommandKind::FtAliasAdd => "FT.ALIASADD", + CommandKind::FtAliasDel => "FT.ALIASDEL", + CommandKind::FtAliasUpdate => "FT.ALIASUPDATE", + CommandKind::FtConfigGet => "FT.CONFIG GET", + CommandKind::FtConfigSet => "FT.CONFIG SET", + CommandKind::FtCursorDel => "FT.CURSOR DEL", + CommandKind::FtCursorRead => "FT.CURSOR READ", + CommandKind::FtDictAdd => "FT.DICTADD", + CommandKind::FtDictDel => "FT.DICTDEL", + CommandKind::FtDictDump => "FT.DICTDUMP", + CommandKind::FtDropIndex => "FT.DROPINDEX", + CommandKind::FtExplain => "FT.EXPLAIN", + CommandKind::FtInfo => "FT.INFO", + CommandKind::FtSpellCheck => "FT.SPELLCHECK", + CommandKind::FtSugAdd => "FT.SUGADD", + CommandKind::FtSugDel => "FT.SUGDEL", + CommandKind::FtSugGet => "FT.SUGGET", + CommandKind::FtSugLen => "FT.SUGLEN", + CommandKind::FtSynDump => "FT.SYNDUMP", + CommandKind::FtSynUpdate => "FT.SYNUPDATE", + CommandKind::FtTagVals => "FT.TAGVALS", + CommandKind::_Custom(ref kind) => &kind.cmd, } } @@ -974,382 +926,381 @@ impl RedisCommandKind { /// command. pub(crate) fn cmd_str(&self) -> Str { let s = match *self { - RedisCommandKind::AclLoad - | RedisCommandKind::AclSave - | RedisCommandKind::AclList - | RedisCommandKind::AclUsers - | RedisCommandKind::AclGetUser - | RedisCommandKind::AclSetUser - | RedisCommandKind::AclDelUser - | RedisCommandKind::AclCat - | RedisCommandKind::AclGenPass - | RedisCommandKind::AclWhoAmI - | RedisCommandKind::AclLog - | RedisCommandKind::AclHelp => "ACL", - RedisCommandKind::Append => "APPEND", - RedisCommandKind::Auth => "AUTH", - RedisCommandKind::Asking => "ASKING", - RedisCommandKind::BgreWriteAof => "BGREWRITEAOF", - RedisCommandKind::BgSave => "BGSAVE", - RedisCommandKind::BitCount => "BITCOUNT", - RedisCommandKind::BitField => "BITFIELD", - RedisCommandKind::BitOp => "BITOP", - RedisCommandKind::BitPos => "BITPOS", - RedisCommandKind::BlPop => "BLPOP", - RedisCommandKind::BlMove => "BLMOVE", - RedisCommandKind::BrPop => "BRPOP", - RedisCommandKind::BrPopLPush => "BRPOPLPUSH", - RedisCommandKind::BzPopMin => "BZPOPMIN", - RedisCommandKind::BzPopMax => "BZPOPMAX", - RedisCommandKind::BzmPop => "BZMPOP", - RedisCommandKind::BlmPop => "BLMPOP", - RedisCommandKind::ClientID - | RedisCommandKind::ClientInfo - | RedisCommandKind::ClientKill - | RedisCommandKind::ClientList - | RedisCommandKind::ClientGetName - | RedisCommandKind::ClientPause - | RedisCommandKind::ClientUnpause - | RedisCommandKind::ClientUnblock - | RedisCommandKind::ClientReply - | RedisCommandKind::ClientSetname - | RedisCommandKind::ClientCaching - | RedisCommandKind::ClientTrackingInfo - | RedisCommandKind::ClientTracking - | RedisCommandKind::ClientGetRedir => "CLIENT", - RedisCommandKind::ClusterAddSlots - | RedisCommandKind::ClusterCountFailureReports - | RedisCommandKind::ClusterCountKeysInSlot - | RedisCommandKind::ClusterDelSlots - | RedisCommandKind::ClusterFailOver - | RedisCommandKind::ClusterForget - | RedisCommandKind::ClusterGetKeysInSlot - | RedisCommandKind::ClusterInfo - | RedisCommandKind::ClusterKeySlot - | RedisCommandKind::ClusterMeet - | RedisCommandKind::ClusterNodes - | RedisCommandKind::ClusterReplicate - | RedisCommandKind::ClusterReset - | RedisCommandKind::ClusterSaveConfig - | RedisCommandKind::ClusterSetConfigEpoch - | RedisCommandKind::ClusterSetSlot - | RedisCommandKind::ClusterReplicas - | RedisCommandKind::ClusterSlots - | RedisCommandKind::ClusterBumpEpoch - | RedisCommandKind::ClusterFlushSlots - | RedisCommandKind::ClusterMyID => "CLUSTER", - RedisCommandKind::ConfigGet - | RedisCommandKind::ConfigRewrite - | RedisCommandKind::ConfigSet - | RedisCommandKind::ConfigResetStat => "CONFIG", - RedisCommandKind::Copy => "COPY", - RedisCommandKind::DBSize => "DBSIZE", - RedisCommandKind::Decr => "DECR", - RedisCommandKind::DecrBy => "DECRBY", - RedisCommandKind::Del => "DEL", - RedisCommandKind::Discard => "DISCARD", - RedisCommandKind::Dump => "DUMP", - RedisCommandKind::Echo => "ECHO", - RedisCommandKind::Eval => "EVAL", - RedisCommandKind::EvalSha => "EVALSHA", - RedisCommandKind::Exec => "EXEC", - RedisCommandKind::Exists => "EXISTS", - RedisCommandKind::Expire => "EXPIRE", - RedisCommandKind::ExpireAt => "EXPIREAT", - RedisCommandKind::ExpireTime => "EXPIRETIME", - RedisCommandKind::Failover => "FAILOVER", - RedisCommandKind::FlushAll => "FLUSHALL", - RedisCommandKind::_FlushAllCluster => "FLUSHALL", - RedisCommandKind::FlushDB => "FLUSHDB", - RedisCommandKind::GeoAdd => "GEOADD", - RedisCommandKind::GeoHash => "GEOHASH", - RedisCommandKind::GeoPos => "GEOPOS", - RedisCommandKind::GeoDist => "GEODIST", - RedisCommandKind::GeoRadius => "GEORADIUS", - RedisCommandKind::GeoRadiusByMember => "GEORADIUSBYMEMBER", - RedisCommandKind::GeoSearch => "GEOSEARCH", - RedisCommandKind::GeoSearchStore => "GEOSEARCHSTORE", - RedisCommandKind::Get => "GET", - RedisCommandKind::GetDel => "GETDEL", - RedisCommandKind::GetBit => "GETBIT", - RedisCommandKind::GetRange => "GETRANGE", - RedisCommandKind::GetSet => "GETSET", - RedisCommandKind::HDel => "HDEL", - RedisCommandKind::_Hello(_) => "HELLO", - RedisCommandKind::HExists => "HEXISTS", - RedisCommandKind::HGet => "HGET", - RedisCommandKind::HGetAll => "HGETALL", - RedisCommandKind::HIncrBy => "HINCRBY", - RedisCommandKind::HIncrByFloat => "HINCRBYFLOAT", - RedisCommandKind::HKeys => "HKEYS", - RedisCommandKind::HLen => "HLEN", - RedisCommandKind::HMGet => "HMGET", - RedisCommandKind::HMSet => "HMSET", - RedisCommandKind::HSet => "HSET", - RedisCommandKind::HSetNx => "HSETNX", - RedisCommandKind::HStrLen => "HSTRLEN", - RedisCommandKind::HRandField => "HRANDFIELD", - RedisCommandKind::HTtl => "HTTL", - RedisCommandKind::HExpire => "HEXPIRE", - RedisCommandKind::HExpireAt => "HEXPIREAT", - RedisCommandKind::HExpireTime => "HEXPIRETIME", - RedisCommandKind::HPersist => "HPERSIST", - RedisCommandKind::HPTtl => "HPTTL", - RedisCommandKind::HPExpire => "HPEXPIRE", - RedisCommandKind::HPExpireAt => "HPEXPIREAT", - RedisCommandKind::HPExpireTime => "HPEXPIRETIME", - RedisCommandKind::HVals => "HVALS", - RedisCommandKind::Incr => "INCR", - RedisCommandKind::IncrBy => "INCRBY", - RedisCommandKind::IncrByFloat => "INCRBYFLOAT", - RedisCommandKind::Info => "INFO", - RedisCommandKind::Keys => "KEYS", - RedisCommandKind::LastSave => "LASTSAVE", - RedisCommandKind::LIndex => "LINDEX", - RedisCommandKind::LInsert => "LINSERT", - RedisCommandKind::LLen => "LLEN", - RedisCommandKind::LMove => "LMOVE", - RedisCommandKind::LPop => "LPOP", - RedisCommandKind::LPos => "LPOS", - RedisCommandKind::LPush => "LPUSH", - RedisCommandKind::LPushX => "LPUSHX", - RedisCommandKind::LRange => "LRANGE", - RedisCommandKind::LMPop => "LMPOP", - RedisCommandKind::LRem => "LREM", - RedisCommandKind::LSet => "LSET", - RedisCommandKind::LTrim => "LTRIM", - RedisCommandKind::Lcs => "LCS", - RedisCommandKind::MemoryDoctor => "MEMORY", - RedisCommandKind::MemoryHelp => "MEMORY", - RedisCommandKind::MemoryMallocStats => "MEMORY", - RedisCommandKind::MemoryPurge => "MEMORY", - RedisCommandKind::MemoryStats => "MEMORY", - RedisCommandKind::MemoryUsage => "MEMORY", - RedisCommandKind::Mget => "MGET", - RedisCommandKind::Migrate => "MIGRATE", - RedisCommandKind::Monitor => "MONITOR", - RedisCommandKind::Move => "MOVE", - RedisCommandKind::Mset => "MSET", - RedisCommandKind::Msetnx => "MSETNX", - RedisCommandKind::Multi => "MULTI", - RedisCommandKind::Object => "OBJECT", - RedisCommandKind::Persist => "PERSIST", - RedisCommandKind::Pexpire => "PEXPIRE", - RedisCommandKind::Pexpireat => "PEXPIREAT", - RedisCommandKind::PexpireTime => "PEXPIRETIME", - RedisCommandKind::Pfadd => "PFADD", - RedisCommandKind::Pfcount => "PFCOUNT", - RedisCommandKind::Pfmerge => "PFMERGE", - RedisCommandKind::Ping => "PING", - RedisCommandKind::Psetex => "PSETEX", - RedisCommandKind::Psubscribe => "PSUBSCRIBE", - RedisCommandKind::Pttl => "PTTL", - RedisCommandKind::Publish => "PUBLISH", - RedisCommandKind::Punsubscribe => "PUNSUBSCRIBE", - RedisCommandKind::Quit => "QUIT", - RedisCommandKind::Randomkey => "RANDOMKEY", - RedisCommandKind::Readonly => "READONLY", - RedisCommandKind::Readwrite => "READWRITE", - RedisCommandKind::Rename => "RENAME", - RedisCommandKind::Renamenx => "RENAMENX", - RedisCommandKind::Restore => "RESTORE", - RedisCommandKind::Role => "ROLE", - RedisCommandKind::Rpop => "RPOP", - RedisCommandKind::Rpoplpush => "RPOPLPUSH", - RedisCommandKind::Rpush => "RPUSH", - RedisCommandKind::Rpushx => "RPUSHX", - RedisCommandKind::Sadd => "SADD", - RedisCommandKind::Save => "SAVE", - RedisCommandKind::Scard => "SCARD", - RedisCommandKind::Sdiff => "SDIFF", - RedisCommandKind::Sdiffstore => "SDIFFSTORE", - RedisCommandKind::Select => "SELECT", - RedisCommandKind::Sentinel => "SENTINEL", - RedisCommandKind::Set => "SET", - RedisCommandKind::Setbit => "SETBIT", - RedisCommandKind::Setex => "SETEX", - RedisCommandKind::Setnx => "SETNX", - RedisCommandKind::Setrange => "SETRANGE", - RedisCommandKind::Shutdown => "SHUTDOWN", - RedisCommandKind::Sinter => "SINTER", - RedisCommandKind::Sinterstore => "SINTERSTORE", - RedisCommandKind::Sismember => "SISMEMBER", - RedisCommandKind::Replicaof => "REPLICAOF", - RedisCommandKind::Slowlog => "SLOWLOG", - RedisCommandKind::Smembers => "SMEMBERS", - RedisCommandKind::Smismember => "SMISMEMBER", - RedisCommandKind::Smove => "SMOVE", - RedisCommandKind::Sort => "SORT", - RedisCommandKind::SortRo => "SORT_RO", - RedisCommandKind::Spop => "SPOP", - RedisCommandKind::Srandmember => "SRANDMEMBER", - RedisCommandKind::Srem => "SREM", - RedisCommandKind::Strlen => "STRLEN", - RedisCommandKind::Subscribe => "SUBSCRIBE", - RedisCommandKind::Sunion => "SUNION", - RedisCommandKind::Sunionstore => "SUNIONSTORE", - RedisCommandKind::Swapdb => "SWAPDB", - RedisCommandKind::Sync => "SYNC", - RedisCommandKind::Time => "TIME", - RedisCommandKind::Touch => "TOUCH", - RedisCommandKind::Ttl => "TTL", - RedisCommandKind::Type => "TYPE", - RedisCommandKind::Unsubscribe => "UNSUBSCRIBE", - RedisCommandKind::Unlink => "UNLINK", - RedisCommandKind::Unwatch => "UNWATCH", - RedisCommandKind::Wait => "WAIT", - RedisCommandKind::Watch => "WATCH", - RedisCommandKind::XinfoConsumers | RedisCommandKind::XinfoGroups | RedisCommandKind::XinfoStream => "XINFO", - RedisCommandKind::Xadd => "XADD", - RedisCommandKind::Xtrim => "XTRIM", - RedisCommandKind::Xdel => "XDEL", - RedisCommandKind::Xrange => "XRANGE", - RedisCommandKind::Xrevrange => "XREVRANGE", - RedisCommandKind::Xlen => "XLEN", - RedisCommandKind::Xread => "XREAD", - RedisCommandKind::Xgroupcreate - | RedisCommandKind::XgroupCreateConsumer - | RedisCommandKind::XgroupDelConsumer - | RedisCommandKind::XgroupDestroy - | RedisCommandKind::XgroupSetId => "XGROUP", - RedisCommandKind::Xreadgroup => "XREADGROUP", - RedisCommandKind::Xack => "XACK", - RedisCommandKind::Xclaim => "XCLAIM", - RedisCommandKind::Xautoclaim => "XAUTOCLAIM", - RedisCommandKind::Xpending => "XPENDING", - RedisCommandKind::Zadd => "ZADD", - RedisCommandKind::Zcard => "ZCARD", - RedisCommandKind::Zcount => "ZCOUNT", - RedisCommandKind::Zdiff => "ZDIFF", - RedisCommandKind::Zdiffstore => "ZDIFFSTORE", - RedisCommandKind::Zincrby => "ZINCRBY", - RedisCommandKind::Zinter => "ZINTER", - RedisCommandKind::Zinterstore => "ZINTERSTORE", - RedisCommandKind::Zlexcount => "ZLEXCOUNT", - RedisCommandKind::Zrandmember => "ZRANDMEMBER", - RedisCommandKind::Zrange => "ZRANGE", - RedisCommandKind::Zrangestore => "ZRANGESTORE", - RedisCommandKind::Zrangebylex => "ZRANGEBYLEX", - RedisCommandKind::Zrangebyscore => "ZRANGEBYSCORE", - RedisCommandKind::Zrank => "ZRANK", - RedisCommandKind::Zrem => "ZREM", - RedisCommandKind::Zremrangebylex => "ZREMRANGEBYLEX", - RedisCommandKind::Zremrangebyrank => "ZREMRANGEBYRANK", - RedisCommandKind::Zremrangebyscore => "ZREMRANGEBYSCORE", - RedisCommandKind::Zrevrange => "ZREVRANGE", - RedisCommandKind::Zrevrangebylex => "ZREVRANGEBYLEX", - RedisCommandKind::Zrevrangebyscore => "ZREVRANGEBYSCORE", - RedisCommandKind::Zrevrank => "ZREVRANK", - RedisCommandKind::Zscore => "ZSCORE", - RedisCommandKind::Zmscore => "ZMSCORE", - RedisCommandKind::Zunion => "ZUNION", - RedisCommandKind::Zunionstore => "ZUNIONSTORE", - RedisCommandKind::Zpopmax => "ZPOPMAX", - RedisCommandKind::Zpopmin => "ZPOPMIN", - RedisCommandKind::Zmpop => "ZMPOP", - RedisCommandKind::ScriptDebug - | RedisCommandKind::ScriptExists - | RedisCommandKind::ScriptFlush - | RedisCommandKind::ScriptKill - | RedisCommandKind::ScriptLoad - | RedisCommandKind::_ScriptFlushCluster - | RedisCommandKind::_ScriptKillCluster - | RedisCommandKind::_ScriptLoadCluster => "SCRIPT", - RedisCommandKind::Spublish => "SPUBLISH", - RedisCommandKind::Ssubscribe => "SSUBSCRIBE", - RedisCommandKind::Sunsubscribe => "SUNSUBSCRIBE", - RedisCommandKind::Scan => "SCAN", - RedisCommandKind::Sscan => "SSCAN", - RedisCommandKind::Hscan => "HSCAN", - RedisCommandKind::Zscan => "ZSCAN", - RedisCommandKind::Fcall => "FCALL", - RedisCommandKind::FcallRO => "FCALL_RO", - RedisCommandKind::FunctionDelete - | RedisCommandKind::FunctionDump - | RedisCommandKind::FunctionFlush - | RedisCommandKind::FunctionKill - | RedisCommandKind::FunctionList - | RedisCommandKind::FunctionLoad - | RedisCommandKind::FunctionRestore - | RedisCommandKind::FunctionStats - | RedisCommandKind::_FunctionFlushCluster - | RedisCommandKind::_FunctionRestoreCluster - | RedisCommandKind::_FunctionDeleteCluster - | RedisCommandKind::_FunctionLoadCluster => "FUNCTION", - RedisCommandKind::PubsubChannels - | RedisCommandKind::PubsubNumpat - | RedisCommandKind::PubsubNumsub - | RedisCommandKind::PubsubShardchannels - | RedisCommandKind::PubsubShardnumsub => "PUBSUB", - RedisCommandKind::_AuthAllCluster => "AUTH", - RedisCommandKind::_HelloAllCluster(_) => "HELLO", - RedisCommandKind::_ClientTrackingCluster => "CLIENT", - RedisCommandKind::JsonArrAppend => "JSON.ARRAPPEND", - RedisCommandKind::JsonArrIndex => "JSON.ARRINDEX", - RedisCommandKind::JsonArrInsert => "JSON.ARRINSERT", - RedisCommandKind::JsonArrLen => "JSON.ARRLEN", - RedisCommandKind::JsonArrPop => "JSON.ARRPOP", - RedisCommandKind::JsonArrTrim => "JSON.ARRTRIM", - RedisCommandKind::JsonClear => "JSON.CLEAR", - RedisCommandKind::JsonDebugMemory => "JSON.DEBUG", - RedisCommandKind::JsonDel => "JSON.DEL", - RedisCommandKind::JsonGet => "JSON.GET", - RedisCommandKind::JsonMerge => "JSON.MERGE", - RedisCommandKind::JsonMGet => "JSON.MGET", - RedisCommandKind::JsonMSet => "JSON.MSET", - RedisCommandKind::JsonNumIncrBy => "JSON.NUMINCRBY", - RedisCommandKind::JsonObjKeys => "JSON.OBJKEYS", - RedisCommandKind::JsonObjLen => "JSON.OBJLEN", - RedisCommandKind::JsonResp => "JSON.RESP", - RedisCommandKind::JsonSet => "JSON.SET", - RedisCommandKind::JsonStrAppend => "JSON.STRAPPEND", - RedisCommandKind::JsonStrLen => "JSON.STRLEN", - RedisCommandKind::JsonToggle => "JSON.TOGGLE", - RedisCommandKind::JsonType => "JSON.TYPE", - RedisCommandKind::TsAdd => "TS.ADD", - RedisCommandKind::TsAlter => "TS.ALTER", - RedisCommandKind::TsCreate => "TS.CREATE", - RedisCommandKind::TsCreateRule => "TS.CREATERULE", - RedisCommandKind::TsDecrBy => "TS.DECRBY", - RedisCommandKind::TsDel => "TS.DEL", - RedisCommandKind::TsDeleteRule => "TS.DELETERULE", - RedisCommandKind::TsGet => "TS.GET", - RedisCommandKind::TsIncrBy => "TS.INCRBY", - RedisCommandKind::TsInfo => "TS.INFO", - RedisCommandKind::TsMAdd => "TS.MADD", - RedisCommandKind::TsMGet => "TS.MGET", - RedisCommandKind::TsMRange => "TS.MRANGE", - RedisCommandKind::TsMRevRange => "TS.MREVRANGE", - RedisCommandKind::TsQueryIndex => "TS.QUERYINDEX", - RedisCommandKind::TsRange => "TS.RANGE", - RedisCommandKind::TsRevRange => "TS.REVRANGE", - RedisCommandKind::FtList => "FT._LIST", - RedisCommandKind::FtAggregate => "FT.AGGREGATE", - RedisCommandKind::FtSearch => "FT.SEARCH", - RedisCommandKind::FtCreate => "FT.CREATE", - RedisCommandKind::FtAlter => "FT.ALTER", - RedisCommandKind::FtAliasAdd => "FT.ALIASADD", - RedisCommandKind::FtAliasDel => "FT.ALIASDEL", - RedisCommandKind::FtAliasUpdate => "FT.ALIASUPDATE", - RedisCommandKind::FtConfigGet => "FT.CONFIG", - RedisCommandKind::FtConfigSet => "FT.CONFIG", - RedisCommandKind::FtCursorDel => "FT.CURSOR", - RedisCommandKind::FtCursorRead => "FT.CURSOR", - RedisCommandKind::FtDictAdd => "FT.DICTADD", - RedisCommandKind::FtDictDel => "FT.DICTDEL", - RedisCommandKind::FtDictDump => "FT.DICTDUMP", - RedisCommandKind::FtDropIndex => "FT.DROPINDEX", - RedisCommandKind::FtExplain => "FT.EXPLAIN", - RedisCommandKind::FtInfo => "FT.INFO", - RedisCommandKind::FtSpellCheck => "FT.SPELLCHECK", - RedisCommandKind::FtSugAdd => "FT.SUGADD", - RedisCommandKind::FtSugDel => "FT.SUGDEL", - RedisCommandKind::FtSugGet => "FT.SUGGET", - RedisCommandKind::FtSugLen => "FT.SUGLEN", - RedisCommandKind::FtSynDump => "FT.SYNDUMP", - RedisCommandKind::FtSynUpdate => "FT.SYNUPDATE", - RedisCommandKind::FtTagVals => "FT.TAGVALS", - RedisCommandKind::_Custom(ref kind) => return kind.cmd.clone(), + CommandKind::AclLoad + | CommandKind::AclSave + | CommandKind::AclList + | CommandKind::AclUsers + | CommandKind::AclGetUser + | CommandKind::AclSetUser + | CommandKind::AclDelUser + | CommandKind::AclCat + | CommandKind::AclGenPass + | CommandKind::AclWhoAmI + | CommandKind::AclLog + | CommandKind::AclHelp => "ACL", + CommandKind::Append => "APPEND", + CommandKind::Auth => "AUTH", + CommandKind::Asking => "ASKING", + CommandKind::BgreWriteAof => "BGREWRITEAOF", + CommandKind::BgSave => "BGSAVE", + CommandKind::BitCount => "BITCOUNT", + CommandKind::BitField => "BITFIELD", + CommandKind::BitOp => "BITOP", + CommandKind::BitPos => "BITPOS", + CommandKind::BlPop => "BLPOP", + CommandKind::BlMove => "BLMOVE", + CommandKind::BrPop => "BRPOP", + CommandKind::BrPopLPush => "BRPOPLPUSH", + CommandKind::BzPopMin => "BZPOPMIN", + CommandKind::BzPopMax => "BZPOPMAX", + CommandKind::BzmPop => "BZMPOP", + CommandKind::BlmPop => "BLMPOP", + CommandKind::ClientID + | CommandKind::ClientInfo + | CommandKind::ClientKill + | CommandKind::ClientList + | CommandKind::ClientGetName + | CommandKind::ClientPause + | CommandKind::ClientUnpause + | CommandKind::ClientUnblock + | CommandKind::ClientReply + | CommandKind::ClientSetname + | CommandKind::ClientCaching + | CommandKind::ClientTrackingInfo + | CommandKind::ClientTracking + | CommandKind::ClientGetRedir => "CLIENT", + CommandKind::ClusterAddSlots + | CommandKind::ClusterCountFailureReports + | CommandKind::ClusterCountKeysInSlot + | CommandKind::ClusterDelSlots + | CommandKind::ClusterFailOver + | CommandKind::ClusterForget + | CommandKind::ClusterGetKeysInSlot + | CommandKind::ClusterInfo + | CommandKind::ClusterKeySlot + | CommandKind::ClusterMeet + | CommandKind::ClusterNodes + | CommandKind::ClusterReplicate + | CommandKind::ClusterReset + | CommandKind::ClusterSaveConfig + | CommandKind::ClusterSetConfigEpoch + | CommandKind::ClusterSetSlot + | CommandKind::ClusterReplicas + | CommandKind::ClusterSlots + | CommandKind::ClusterBumpEpoch + | CommandKind::ClusterFlushSlots + | CommandKind::ClusterMyID => "CLUSTER", + CommandKind::ConfigGet | CommandKind::ConfigRewrite | CommandKind::ConfigSet | CommandKind::ConfigResetStat => { + "CONFIG" + }, + CommandKind::Copy => "COPY", + CommandKind::DBSize => "DBSIZE", + CommandKind::Decr => "DECR", + CommandKind::DecrBy => "DECRBY", + CommandKind::Del => "DEL", + CommandKind::Discard => "DISCARD", + CommandKind::Dump => "DUMP", + CommandKind::Echo => "ECHO", + CommandKind::Eval => "EVAL", + CommandKind::EvalSha => "EVALSHA", + CommandKind::Exec => "EXEC", + CommandKind::Exists => "EXISTS", + CommandKind::Expire => "EXPIRE", + CommandKind::ExpireAt => "EXPIREAT", + CommandKind::ExpireTime => "EXPIRETIME", + CommandKind::Failover => "FAILOVER", + CommandKind::FlushAll => "FLUSHALL", + CommandKind::_FlushAllCluster => "FLUSHALL", + CommandKind::FlushDB => "FLUSHDB", + CommandKind::GeoAdd => "GEOADD", + CommandKind::GeoHash => "GEOHASH", + CommandKind::GeoPos => "GEOPOS", + CommandKind::GeoDist => "GEODIST", + CommandKind::GeoRadius => "GEORADIUS", + CommandKind::GeoRadiusByMember => "GEORADIUSBYMEMBER", + CommandKind::GeoSearch => "GEOSEARCH", + CommandKind::GeoSearchStore => "GEOSEARCHSTORE", + CommandKind::Get => "GET", + CommandKind::GetDel => "GETDEL", + CommandKind::GetBit => "GETBIT", + CommandKind::GetRange => "GETRANGE", + CommandKind::GetSet => "GETSET", + CommandKind::HDel => "HDEL", + CommandKind::_Hello(_) => "HELLO", + CommandKind::HExists => "HEXISTS", + CommandKind::HGet => "HGET", + CommandKind::HGetAll => "HGETALL", + CommandKind::HIncrBy => "HINCRBY", + CommandKind::HIncrByFloat => "HINCRBYFLOAT", + CommandKind::HKeys => "HKEYS", + CommandKind::HLen => "HLEN", + CommandKind::HMGet => "HMGET", + CommandKind::HMSet => "HMSET", + CommandKind::HSet => "HSET", + CommandKind::HSetNx => "HSETNX", + CommandKind::HStrLen => "HSTRLEN", + CommandKind::HRandField => "HRANDFIELD", + CommandKind::HTtl => "HTTL", + CommandKind::HExpire => "HEXPIRE", + CommandKind::HExpireAt => "HEXPIREAT", + CommandKind::HExpireTime => "HEXPIRETIME", + CommandKind::HPersist => "HPERSIST", + CommandKind::HPTtl => "HPTTL", + CommandKind::HPExpire => "HPEXPIRE", + CommandKind::HPExpireAt => "HPEXPIREAT", + CommandKind::HPExpireTime => "HPEXPIRETIME", + CommandKind::HVals => "HVALS", + CommandKind::Incr => "INCR", + CommandKind::IncrBy => "INCRBY", + CommandKind::IncrByFloat => "INCRBYFLOAT", + CommandKind::Info => "INFO", + CommandKind::Keys => "KEYS", + CommandKind::LastSave => "LASTSAVE", + CommandKind::LIndex => "LINDEX", + CommandKind::LInsert => "LINSERT", + CommandKind::LLen => "LLEN", + CommandKind::LMove => "LMOVE", + CommandKind::LPop => "LPOP", + CommandKind::LPos => "LPOS", + CommandKind::LPush => "LPUSH", + CommandKind::LPushX => "LPUSHX", + CommandKind::LRange => "LRANGE", + CommandKind::LMPop => "LMPOP", + CommandKind::LRem => "LREM", + CommandKind::LSet => "LSET", + CommandKind::LTrim => "LTRIM", + CommandKind::Lcs => "LCS", + CommandKind::MemoryDoctor => "MEMORY", + CommandKind::MemoryHelp => "MEMORY", + CommandKind::MemoryMallocStats => "MEMORY", + CommandKind::MemoryPurge => "MEMORY", + CommandKind::MemoryStats => "MEMORY", + CommandKind::MemoryUsage => "MEMORY", + CommandKind::Mget => "MGET", + CommandKind::Migrate => "MIGRATE", + CommandKind::Monitor => "MONITOR", + CommandKind::Move => "MOVE", + CommandKind::Mset => "MSET", + CommandKind::Msetnx => "MSETNX", + CommandKind::Multi => "MULTI", + CommandKind::Object => "OBJECT", + CommandKind::Persist => "PERSIST", + CommandKind::Pexpire => "PEXPIRE", + CommandKind::Pexpireat => "PEXPIREAT", + CommandKind::PexpireTime => "PEXPIRETIME", + CommandKind::Pfadd => "PFADD", + CommandKind::Pfcount => "PFCOUNT", + CommandKind::Pfmerge => "PFMERGE", + CommandKind::Ping => "PING", + CommandKind::Psetex => "PSETEX", + CommandKind::Psubscribe => "PSUBSCRIBE", + CommandKind::Pttl => "PTTL", + CommandKind::Publish => "PUBLISH", + CommandKind::Punsubscribe => "PUNSUBSCRIBE", + CommandKind::Quit => "QUIT", + CommandKind::Randomkey => "RANDOMKEY", + CommandKind::Readonly => "READONLY", + CommandKind::Readwrite => "READWRITE", + CommandKind::Rename => "RENAME", + CommandKind::Renamenx => "RENAMENX", + CommandKind::Restore => "RESTORE", + CommandKind::Role => "ROLE", + CommandKind::Rpop => "RPOP", + CommandKind::Rpoplpush => "RPOPLPUSH", + CommandKind::Rpush => "RPUSH", + CommandKind::Rpushx => "RPUSHX", + CommandKind::Sadd => "SADD", + CommandKind::Save => "SAVE", + CommandKind::Scard => "SCARD", + CommandKind::Sdiff => "SDIFF", + CommandKind::Sdiffstore => "SDIFFSTORE", + CommandKind::Select => "SELECT", + CommandKind::Sentinel => "SENTINEL", + CommandKind::Set => "SET", + CommandKind::Setbit => "SETBIT", + CommandKind::Setex => "SETEX", + CommandKind::Setnx => "SETNX", + CommandKind::Setrange => "SETRANGE", + CommandKind::Shutdown => "SHUTDOWN", + CommandKind::Sinter => "SINTER", + CommandKind::Sinterstore => "SINTERSTORE", + CommandKind::Sismember => "SISMEMBER", + CommandKind::Replicaof => "REPLICAOF", + CommandKind::Slowlog => "SLOWLOG", + CommandKind::Smembers => "SMEMBERS", + CommandKind::Smismember => "SMISMEMBER", + CommandKind::Smove => "SMOVE", + CommandKind::Sort => "SORT", + CommandKind::SortRo => "SORT_RO", + CommandKind::Spop => "SPOP", + CommandKind::Srandmember => "SRANDMEMBER", + CommandKind::Srem => "SREM", + CommandKind::Strlen => "STRLEN", + CommandKind::Subscribe => "SUBSCRIBE", + CommandKind::Sunion => "SUNION", + CommandKind::Sunionstore => "SUNIONSTORE", + CommandKind::Swapdb => "SWAPDB", + CommandKind::Sync => "SYNC", + CommandKind::Time => "TIME", + CommandKind::Touch => "TOUCH", + CommandKind::Ttl => "TTL", + CommandKind::Type => "TYPE", + CommandKind::Unsubscribe => "UNSUBSCRIBE", + CommandKind::Unlink => "UNLINK", + CommandKind::Unwatch => "UNWATCH", + CommandKind::Wait => "WAIT", + CommandKind::Watch => "WATCH", + CommandKind::XinfoConsumers | CommandKind::XinfoGroups | CommandKind::XinfoStream => "XINFO", + CommandKind::Xadd => "XADD", + CommandKind::Xtrim => "XTRIM", + CommandKind::Xdel => "XDEL", + CommandKind::Xrange => "XRANGE", + CommandKind::Xrevrange => "XREVRANGE", + CommandKind::Xlen => "XLEN", + CommandKind::Xread => "XREAD", + CommandKind::Xgroupcreate + | CommandKind::XgroupCreateConsumer + | CommandKind::XgroupDelConsumer + | CommandKind::XgroupDestroy + | CommandKind::XgroupSetId => "XGROUP", + CommandKind::Xreadgroup => "XREADGROUP", + CommandKind::Xack => "XACK", + CommandKind::Xclaim => "XCLAIM", + CommandKind::Xautoclaim => "XAUTOCLAIM", + CommandKind::Xpending => "XPENDING", + CommandKind::Zadd => "ZADD", + CommandKind::Zcard => "ZCARD", + CommandKind::Zcount => "ZCOUNT", + CommandKind::Zdiff => "ZDIFF", + CommandKind::Zdiffstore => "ZDIFFSTORE", + CommandKind::Zincrby => "ZINCRBY", + CommandKind::Zinter => "ZINTER", + CommandKind::Zinterstore => "ZINTERSTORE", + CommandKind::Zlexcount => "ZLEXCOUNT", + CommandKind::Zrandmember => "ZRANDMEMBER", + CommandKind::Zrange => "ZRANGE", + CommandKind::Zrangestore => "ZRANGESTORE", + CommandKind::Zrangebylex => "ZRANGEBYLEX", + CommandKind::Zrangebyscore => "ZRANGEBYSCORE", + CommandKind::Zrank => "ZRANK", + CommandKind::Zrem => "ZREM", + CommandKind::Zremrangebylex => "ZREMRANGEBYLEX", + CommandKind::Zremrangebyrank => "ZREMRANGEBYRANK", + CommandKind::Zremrangebyscore => "ZREMRANGEBYSCORE", + CommandKind::Zrevrange => "ZREVRANGE", + CommandKind::Zrevrangebylex => "ZREVRANGEBYLEX", + CommandKind::Zrevrangebyscore => "ZREVRANGEBYSCORE", + CommandKind::Zrevrank => "ZREVRANK", + CommandKind::Zscore => "ZSCORE", + CommandKind::Zmscore => "ZMSCORE", + CommandKind::Zunion => "ZUNION", + CommandKind::Zunionstore => "ZUNIONSTORE", + CommandKind::Zpopmax => "ZPOPMAX", + CommandKind::Zpopmin => "ZPOPMIN", + CommandKind::Zmpop => "ZMPOP", + CommandKind::ScriptDebug + | CommandKind::ScriptExists + | CommandKind::ScriptFlush + | CommandKind::ScriptKill + | CommandKind::ScriptLoad + | CommandKind::_ScriptFlushCluster + | CommandKind::_ScriptKillCluster + | CommandKind::_ScriptLoadCluster => "SCRIPT", + CommandKind::Spublish => "SPUBLISH", + CommandKind::Ssubscribe => "SSUBSCRIBE", + CommandKind::Sunsubscribe => "SUNSUBSCRIBE", + CommandKind::Scan => "SCAN", + CommandKind::Sscan => "SSCAN", + CommandKind::Hscan => "HSCAN", + CommandKind::Zscan => "ZSCAN", + CommandKind::Fcall => "FCALL", + CommandKind::FcallRO => "FCALL_RO", + CommandKind::FunctionDelete + | CommandKind::FunctionDump + | CommandKind::FunctionFlush + | CommandKind::FunctionKill + | CommandKind::FunctionList + | CommandKind::FunctionLoad + | CommandKind::FunctionRestore + | CommandKind::FunctionStats + | CommandKind::_FunctionFlushCluster + | CommandKind::_FunctionRestoreCluster + | CommandKind::_FunctionDeleteCluster + | CommandKind::_FunctionLoadCluster => "FUNCTION", + CommandKind::PubsubChannels + | CommandKind::PubsubNumpat + | CommandKind::PubsubNumsub + | CommandKind::PubsubShardchannels + | CommandKind::PubsubShardnumsub => "PUBSUB", + CommandKind::_AuthAllCluster => "AUTH", + CommandKind::_HelloAllCluster(_) => "HELLO", + CommandKind::_ClientTrackingCluster => "CLIENT", + CommandKind::JsonArrAppend => "JSON.ARRAPPEND", + CommandKind::JsonArrIndex => "JSON.ARRINDEX", + CommandKind::JsonArrInsert => "JSON.ARRINSERT", + CommandKind::JsonArrLen => "JSON.ARRLEN", + CommandKind::JsonArrPop => "JSON.ARRPOP", + CommandKind::JsonArrTrim => "JSON.ARRTRIM", + CommandKind::JsonClear => "JSON.CLEAR", + CommandKind::JsonDebugMemory => "JSON.DEBUG", + CommandKind::JsonDel => "JSON.DEL", + CommandKind::JsonGet => "JSON.GET", + CommandKind::JsonMerge => "JSON.MERGE", + CommandKind::JsonMGet => "JSON.MGET", + CommandKind::JsonMSet => "JSON.MSET", + CommandKind::JsonNumIncrBy => "JSON.NUMINCRBY", + CommandKind::JsonObjKeys => "JSON.OBJKEYS", + CommandKind::JsonObjLen => "JSON.OBJLEN", + CommandKind::JsonResp => "JSON.RESP", + CommandKind::JsonSet => "JSON.SET", + CommandKind::JsonStrAppend => "JSON.STRAPPEND", + CommandKind::JsonStrLen => "JSON.STRLEN", + CommandKind::JsonToggle => "JSON.TOGGLE", + CommandKind::JsonType => "JSON.TYPE", + CommandKind::TsAdd => "TS.ADD", + CommandKind::TsAlter => "TS.ALTER", + CommandKind::TsCreate => "TS.CREATE", + CommandKind::TsCreateRule => "TS.CREATERULE", + CommandKind::TsDecrBy => "TS.DECRBY", + CommandKind::TsDel => "TS.DEL", + CommandKind::TsDeleteRule => "TS.DELETERULE", + CommandKind::TsGet => "TS.GET", + CommandKind::TsIncrBy => "TS.INCRBY", + CommandKind::TsInfo => "TS.INFO", + CommandKind::TsMAdd => "TS.MADD", + CommandKind::TsMGet => "TS.MGET", + CommandKind::TsMRange => "TS.MRANGE", + CommandKind::TsMRevRange => "TS.MREVRANGE", + CommandKind::TsQueryIndex => "TS.QUERYINDEX", + CommandKind::TsRange => "TS.RANGE", + CommandKind::TsRevRange => "TS.REVRANGE", + CommandKind::FtList => "FT._LIST", + CommandKind::FtAggregate => "FT.AGGREGATE", + CommandKind::FtSearch => "FT.SEARCH", + CommandKind::FtCreate => "FT.CREATE", + CommandKind::FtAlter => "FT.ALTER", + CommandKind::FtAliasAdd => "FT.ALIASADD", + CommandKind::FtAliasDel => "FT.ALIASDEL", + CommandKind::FtAliasUpdate => "FT.ALIASUPDATE", + CommandKind::FtConfigGet => "FT.CONFIG", + CommandKind::FtConfigSet => "FT.CONFIG", + CommandKind::FtCursorDel => "FT.CURSOR", + CommandKind::FtCursorRead => "FT.CURSOR", + CommandKind::FtDictAdd => "FT.DICTADD", + CommandKind::FtDictDel => "FT.DICTDEL", + CommandKind::FtDictDump => "FT.DICTDUMP", + CommandKind::FtDropIndex => "FT.DROPINDEX", + CommandKind::FtExplain => "FT.EXPLAIN", + CommandKind::FtInfo => "FT.INFO", + CommandKind::FtSpellCheck => "FT.SPELLCHECK", + CommandKind::FtSugAdd => "FT.SUGADD", + CommandKind::FtSugDel => "FT.SUGDEL", + CommandKind::FtSugGet => "FT.SUGGET", + CommandKind::FtSugLen => "FT.SUGLEN", + CommandKind::FtSynDump => "FT.SYNDUMP", + CommandKind::FtSynUpdate => "FT.SYNUPDATE", + CommandKind::FtTagVals => "FT.TAGVALS", + CommandKind::_Custom(ref kind) => return kind.cmd.clone(), }; client_utils::static_str(s) @@ -1358,102 +1309,102 @@ impl RedisCommandKind { /// Read the optional subcommand string for a command. pub fn subcommand_str(&self) -> Option { let s = match *self { - RedisCommandKind::ScriptDebug => "DEBUG", - RedisCommandKind::ScriptLoad => "LOAD", - RedisCommandKind::ScriptKill => "KILL", - RedisCommandKind::ScriptFlush => "FLUSH", - RedisCommandKind::ScriptExists => "EXISTS", - RedisCommandKind::_ScriptFlushCluster => "FLUSH", - RedisCommandKind::_ScriptLoadCluster => "LOAD", - RedisCommandKind::_ScriptKillCluster => "KILL", - RedisCommandKind::AclLoad => "LOAD", - RedisCommandKind::AclSave => "SAVE", - RedisCommandKind::AclList => "LIST", - RedisCommandKind::AclUsers => "USERS", - RedisCommandKind::AclGetUser => "GETUSER", - RedisCommandKind::AclSetUser => "SETUSER", - RedisCommandKind::AclDelUser => "DELUSER", - RedisCommandKind::AclCat => "CAT", - RedisCommandKind::AclGenPass => "GENPASS", - RedisCommandKind::AclWhoAmI => "WHOAMI", - RedisCommandKind::AclLog => "LOG", - RedisCommandKind::AclHelp => "HELP", - RedisCommandKind::ClusterAddSlots => "ADDSLOTS", - RedisCommandKind::ClusterCountFailureReports => "COUNT-FAILURE-REPORTS", - RedisCommandKind::ClusterCountKeysInSlot => "COUNTKEYSINSLOT", - RedisCommandKind::ClusterDelSlots => "DELSLOTS", - RedisCommandKind::ClusterFailOver => "FAILOVER", - RedisCommandKind::ClusterForget => "FORGET", - RedisCommandKind::ClusterGetKeysInSlot => "GETKEYSINSLOT", - RedisCommandKind::ClusterInfo => "INFO", - RedisCommandKind::ClusterKeySlot => "KEYSLOT", - RedisCommandKind::ClusterMeet => "MEET", - RedisCommandKind::ClusterNodes => "NODES", - RedisCommandKind::ClusterReplicate => "REPLICATE", - RedisCommandKind::ClusterReset => "RESET", - RedisCommandKind::ClusterSaveConfig => "SAVECONFIG", - RedisCommandKind::ClusterSetConfigEpoch => "SET-CONFIG-EPOCH", - RedisCommandKind::ClusterSetSlot => "SETSLOT", - RedisCommandKind::ClusterReplicas => "REPLICAS", - RedisCommandKind::ClusterSlots => "SLOTS", - RedisCommandKind::ClusterBumpEpoch => "BUMPEPOCH", - RedisCommandKind::ClusterFlushSlots => "FLUSHSLOTS", - RedisCommandKind::ClusterMyID => "MYID", - RedisCommandKind::ClientID => "ID", - RedisCommandKind::ClientInfo => "INFO", - RedisCommandKind::ClientKill => "KILL", - RedisCommandKind::ClientList => "LIST", - RedisCommandKind::ClientGetName => "GETNAME", - RedisCommandKind::ClientPause => "PAUSE", - RedisCommandKind::ClientUnpause => "UNPAUSE", - RedisCommandKind::ClientUnblock => "UNBLOCK", - RedisCommandKind::ClientReply => "REPLY", - RedisCommandKind::ClientSetname => "SETNAME", - RedisCommandKind::ConfigGet => "GET", - RedisCommandKind::ConfigRewrite => "REWRITE", - RedisCommandKind::ClientGetRedir => "GETREDIR", - RedisCommandKind::ClientTracking => "TRACKING", - RedisCommandKind::ClientTrackingInfo => "TRACKINGINFO", - RedisCommandKind::ClientCaching => "CACHING", - RedisCommandKind::ConfigSet => "SET", - RedisCommandKind::ConfigResetStat => "RESETSTAT", - RedisCommandKind::MemoryDoctor => "DOCTOR", - RedisCommandKind::MemoryHelp => "HELP", - RedisCommandKind::MemoryUsage => "USAGE", - RedisCommandKind::MemoryMallocStats => "MALLOC-STATS", - RedisCommandKind::MemoryStats => "STATS", - RedisCommandKind::MemoryPurge => "PURGE", - RedisCommandKind::XinfoConsumers => "CONSUMERS", - RedisCommandKind::XinfoGroups => "GROUPS", - RedisCommandKind::XinfoStream => "STREAM", - RedisCommandKind::Xgroupcreate => "CREATE", - RedisCommandKind::XgroupCreateConsumer => "CREATECONSUMER", - RedisCommandKind::XgroupDelConsumer => "DELCONSUMER", - RedisCommandKind::XgroupDestroy => "DESTROY", - RedisCommandKind::XgroupSetId => "SETID", - RedisCommandKind::FunctionDelete => "DELETE", - RedisCommandKind::FunctionDump => "DUMP", - RedisCommandKind::FunctionFlush => "FLUSH", - RedisCommandKind::FunctionKill => "KILL", - RedisCommandKind::FunctionList => "LIST", - RedisCommandKind::FunctionLoad => "LOAD", - RedisCommandKind::FunctionRestore => "RESTORE", - RedisCommandKind::FunctionStats => "STATS", - RedisCommandKind::PubsubChannels => "CHANNELS", - RedisCommandKind::PubsubNumpat => "NUMPAT", - RedisCommandKind::PubsubNumsub => "NUMSUB", - RedisCommandKind::PubsubShardchannels => "SHARDCHANNELS", - RedisCommandKind::PubsubShardnumsub => "SHARDNUMSUB", - RedisCommandKind::_FunctionLoadCluster => "LOAD", - RedisCommandKind::_FunctionFlushCluster => "FLUSH", - RedisCommandKind::_FunctionDeleteCluster => "DELETE", - RedisCommandKind::_FunctionRestoreCluster => "RESTORE", - RedisCommandKind::_ClientTrackingCluster => "TRACKING", - RedisCommandKind::JsonDebugMemory => "MEMORY", - RedisCommandKind::FtConfigGet => "GET", - RedisCommandKind::FtConfigSet => "SET", - RedisCommandKind::FtCursorDel => "DEL", - RedisCommandKind::FtCursorRead => "READ", + CommandKind::ScriptDebug => "DEBUG", + CommandKind::ScriptLoad => "LOAD", + CommandKind::ScriptKill => "KILL", + CommandKind::ScriptFlush => "FLUSH", + CommandKind::ScriptExists => "EXISTS", + CommandKind::_ScriptFlushCluster => "FLUSH", + CommandKind::_ScriptLoadCluster => "LOAD", + CommandKind::_ScriptKillCluster => "KILL", + CommandKind::AclLoad => "LOAD", + CommandKind::AclSave => "SAVE", + CommandKind::AclList => "LIST", + CommandKind::AclUsers => "USERS", + CommandKind::AclGetUser => "GETUSER", + CommandKind::AclSetUser => "SETUSER", + CommandKind::AclDelUser => "DELUSER", + CommandKind::AclCat => "CAT", + CommandKind::AclGenPass => "GENPASS", + CommandKind::AclWhoAmI => "WHOAMI", + CommandKind::AclLog => "LOG", + CommandKind::AclHelp => "HELP", + CommandKind::ClusterAddSlots => "ADDSLOTS", + CommandKind::ClusterCountFailureReports => "COUNT-FAILURE-REPORTS", + CommandKind::ClusterCountKeysInSlot => "COUNTKEYSINSLOT", + CommandKind::ClusterDelSlots => "DELSLOTS", + CommandKind::ClusterFailOver => "FAILOVER", + CommandKind::ClusterForget => "FORGET", + CommandKind::ClusterGetKeysInSlot => "GETKEYSINSLOT", + CommandKind::ClusterInfo => "INFO", + CommandKind::ClusterKeySlot => "KEYSLOT", + CommandKind::ClusterMeet => "MEET", + CommandKind::ClusterNodes => "NODES", + CommandKind::ClusterReplicate => "REPLICATE", + CommandKind::ClusterReset => "RESET", + CommandKind::ClusterSaveConfig => "SAVECONFIG", + CommandKind::ClusterSetConfigEpoch => "SET-CONFIG-EPOCH", + CommandKind::ClusterSetSlot => "SETSLOT", + CommandKind::ClusterReplicas => "REPLICAS", + CommandKind::ClusterSlots => "SLOTS", + CommandKind::ClusterBumpEpoch => "BUMPEPOCH", + CommandKind::ClusterFlushSlots => "FLUSHSLOTS", + CommandKind::ClusterMyID => "MYID", + CommandKind::ClientID => "ID", + CommandKind::ClientInfo => "INFO", + CommandKind::ClientKill => "KILL", + CommandKind::ClientList => "LIST", + CommandKind::ClientGetName => "GETNAME", + CommandKind::ClientPause => "PAUSE", + CommandKind::ClientUnpause => "UNPAUSE", + CommandKind::ClientUnblock => "UNBLOCK", + CommandKind::ClientReply => "REPLY", + CommandKind::ClientSetname => "SETNAME", + CommandKind::ConfigGet => "GET", + CommandKind::ConfigRewrite => "REWRITE", + CommandKind::ClientGetRedir => "GETREDIR", + CommandKind::ClientTracking => "TRACKING", + CommandKind::ClientTrackingInfo => "TRACKINGINFO", + CommandKind::ClientCaching => "CACHING", + CommandKind::ConfigSet => "SET", + CommandKind::ConfigResetStat => "RESETSTAT", + CommandKind::MemoryDoctor => "DOCTOR", + CommandKind::MemoryHelp => "HELP", + CommandKind::MemoryUsage => "USAGE", + CommandKind::MemoryMallocStats => "MALLOC-STATS", + CommandKind::MemoryStats => "STATS", + CommandKind::MemoryPurge => "PURGE", + CommandKind::XinfoConsumers => "CONSUMERS", + CommandKind::XinfoGroups => "GROUPS", + CommandKind::XinfoStream => "STREAM", + CommandKind::Xgroupcreate => "CREATE", + CommandKind::XgroupCreateConsumer => "CREATECONSUMER", + CommandKind::XgroupDelConsumer => "DELCONSUMER", + CommandKind::XgroupDestroy => "DESTROY", + CommandKind::XgroupSetId => "SETID", + CommandKind::FunctionDelete => "DELETE", + CommandKind::FunctionDump => "DUMP", + CommandKind::FunctionFlush => "FLUSH", + CommandKind::FunctionKill => "KILL", + CommandKind::FunctionList => "LIST", + CommandKind::FunctionLoad => "LOAD", + CommandKind::FunctionRestore => "RESTORE", + CommandKind::FunctionStats => "STATS", + CommandKind::PubsubChannels => "CHANNELS", + CommandKind::PubsubNumpat => "NUMPAT", + CommandKind::PubsubNumsub => "NUMSUB", + CommandKind::PubsubShardchannels => "SHARDCHANNELS", + CommandKind::PubsubShardnumsub => "SHARDNUMSUB", + CommandKind::_FunctionLoadCluster => "LOAD", + CommandKind::_FunctionFlushCluster => "FLUSH", + CommandKind::_FunctionDeleteCluster => "DELETE", + CommandKind::_FunctionRestoreCluster => "RESTORE", + CommandKind::_ClientTrackingCluster => "TRACKING", + CommandKind::JsonDebugMemory => "MEMORY", + CommandKind::FtConfigGet => "GET", + CommandKind::FtConfigSet => "SET", + CommandKind::FtCursorDel => "DEL", + CommandKind::FtCursorRead => "READ", _ => return None, }; @@ -1463,32 +1414,27 @@ impl RedisCommandKind { pub fn use_random_cluster_node(&self) -> bool { matches!( *self, - RedisCommandKind::Publish - | RedisCommandKind::Ping - | RedisCommandKind::Info - | RedisCommandKind::Scan - | RedisCommandKind::FlushAll - | RedisCommandKind::FlushDB + CommandKind::Publish | CommandKind::Ping | CommandKind::Info | CommandKind::FlushAll | CommandKind::FlushDB ) } pub fn is_blocking(&self) -> bool { match *self { - RedisCommandKind::BlPop - | RedisCommandKind::BrPop - | RedisCommandKind::BrPopLPush - | RedisCommandKind::BlMove - | RedisCommandKind::BzPopMin - | RedisCommandKind::BzPopMax - | RedisCommandKind::BlmPop - | RedisCommandKind::BzmPop - | RedisCommandKind::Fcall - | RedisCommandKind::FcallRO - | RedisCommandKind::Wait => true, + CommandKind::BlPop + | CommandKind::BrPop + | CommandKind::BrPopLPush + | CommandKind::BlMove + | CommandKind::BzPopMin + | CommandKind::BzPopMax + | CommandKind::BlmPop + | CommandKind::BzmPop + | CommandKind::Fcall + | CommandKind::FcallRO + | CommandKind::Wait => true, // default is false, but can be changed by the BLOCKING args. the RedisCommand::can_pipeline function checks the // args too. - RedisCommandKind::Xread | RedisCommandKind::Xreadgroup => false, - RedisCommandKind::_Custom(ref kind) => kind.blocking, + CommandKind::Xread | CommandKind::Xreadgroup => false, + CommandKind::_Custom(ref kind) => kind.blocking, _ => false, } } @@ -1496,35 +1442,47 @@ impl RedisCommandKind { pub fn force_all_cluster_nodes(&self) -> bool { matches!( *self, - RedisCommandKind::_FlushAllCluster - | RedisCommandKind::_AuthAllCluster - | RedisCommandKind::_ScriptFlushCluster - | RedisCommandKind::_ScriptKillCluster - | RedisCommandKind::_HelloAllCluster(_) - | RedisCommandKind::_ClientTrackingCluster - | RedisCommandKind::_ScriptLoadCluster - | RedisCommandKind::_FunctionFlushCluster - | RedisCommandKind::_FunctionDeleteCluster - | RedisCommandKind::_FunctionRestoreCluster - | RedisCommandKind::_FunctionLoadCluster + CommandKind::_FlushAllCluster + | CommandKind::_AuthAllCluster + | CommandKind::_ScriptFlushCluster + | CommandKind::_ScriptKillCluster + | CommandKind::_HelloAllCluster(_) + | CommandKind::_ClientTrackingCluster + | CommandKind::_ScriptLoadCluster + | CommandKind::_FunctionFlushCluster + | CommandKind::_FunctionDeleteCluster + | CommandKind::_FunctionRestoreCluster + | CommandKind::_FunctionLoadCluster ) } pub fn should_flush(&self) -> bool { matches!( *self, - RedisCommandKind::Quit - | RedisCommandKind::Shutdown - | RedisCommandKind::Ping - | RedisCommandKind::Auth - | RedisCommandKind::_Hello(_) - | RedisCommandKind::Exec - | RedisCommandKind::Discard - | RedisCommandKind::Eval - | RedisCommandKind::EvalSha - | RedisCommandKind::Fcall - | RedisCommandKind::FcallRO - | RedisCommandKind::_Custom(_) + CommandKind::Quit + | CommandKind::Shutdown + | CommandKind::Ping + | CommandKind::Auth + | CommandKind::_Hello(_) + | CommandKind::Exec + | CommandKind::Discard + | CommandKind::Eval + | CommandKind::EvalSha + | CommandKind::Fcall + | CommandKind::FcallRO + | CommandKind::_Custom(_) + ) + } + + pub fn is_pubsub(&self) -> bool { + matches!( + *self, + CommandKind::Subscribe + | CommandKind::Unsubscribe + | CommandKind::Psubscribe + | CommandKind::Punsubscribe + | CommandKind::Ssubscribe + | CommandKind::Sunsubscribe ) } @@ -1534,20 +1492,20 @@ impl RedisCommandKind { } else { match self { // make it easier to handle multiple potentially out-of-band responses - RedisCommandKind::Subscribe - | RedisCommandKind::Unsubscribe - | RedisCommandKind::Psubscribe - | RedisCommandKind::Punsubscribe - | RedisCommandKind::Ssubscribe - | RedisCommandKind::Sunsubscribe + CommandKind::Subscribe + | CommandKind::Unsubscribe + | CommandKind::Psubscribe + | CommandKind::Punsubscribe + | CommandKind::Ssubscribe + | CommandKind::Sunsubscribe // https://redis.io/commands/eval#evalsha-in-the-context-of-pipelining - | RedisCommandKind::Eval - | RedisCommandKind::EvalSha - | RedisCommandKind::Auth - | RedisCommandKind::Fcall - | RedisCommandKind::FcallRO + | CommandKind::Eval + | CommandKind::EvalSha + | CommandKind::Auth + | CommandKind::Fcall + | CommandKind::FcallRO // makes it easier to avoid decoding in-flight responses with the wrong codec logic - | RedisCommandKind::_Hello(_) => false, + | CommandKind::_Hello(_) => false, _ => true, } } @@ -1556,14 +1514,14 @@ impl RedisCommandKind { pub fn is_eval(&self) -> bool { matches!( *self, - RedisCommandKind::EvalSha | RedisCommandKind::Eval | RedisCommandKind::Fcall | RedisCommandKind::FcallRO + CommandKind::EvalSha | CommandKind::Eval | CommandKind::Fcall | CommandKind::FcallRO ) } } -pub struct RedisCommand { +pub struct Command { /// The command and optional subcommand name. - pub kind: RedisCommandKind, + pub kind: CommandKind, /// The policy to apply when handling the response. pub response: ResponseKind, /// The policy to use when hashing the arguments for cluster routing. @@ -1571,9 +1529,7 @@ pub struct RedisCommand { /// The provided arguments. /// /// Some commands store arguments differently. Callers should use `self.args()` to account for this. - pub arguments: Vec, - /// A oneshot sender used to communicate with the router. - pub router_tx: RefCount>>, + pub arguments: Vec, /// The number of times the command has been written to a socket. pub write_attempts: u32, /// The number of write attempts remaining. @@ -1584,8 +1540,6 @@ pub struct RedisCommand { /// /// Also used for commands like XREAD that block based on an argument. pub can_pipeline: bool, - /// Whether to skip backpressure checks. - pub skip_backpressure: bool, /// Whether to fail fast without retries if the connection ever closes unexpectedly. pub fail_fast: bool, /// The internal ID of a transaction. @@ -1614,7 +1568,7 @@ pub struct RedisCommand { pub caching: Option, } -impl fmt::Debug for RedisCommand { +impl fmt::Debug for Command { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut formatter = f.debug_struct("RedisCommand"); formatter @@ -1624,7 +1578,6 @@ impl fmt::Debug for RedisCommand { .field("can_pipeline", &self.can_pipeline) .field("write_attempts", &self.write_attempts) .field("timeout_dur", &self.timeout_dur) - .field("no_backpressure", &self.skip_backpressure) .field("cluster_node", &self.cluster_node) .field("cluster_hash", &self.hasher) .field("use_replica", &self.use_replica) @@ -1637,32 +1590,30 @@ impl fmt::Debug for RedisCommand { } } -impl fmt::Display for RedisCommand { +impl fmt::Display for Command { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", self.kind.to_str_debug()) } } -impl From for RedisCommand { - fn from(kind: RedisCommandKind) -> Self { +impl From for Command { + fn from(kind: CommandKind) -> Self { (kind, Vec::new()).into() } } -impl From<(RedisCommandKind, Vec)> for RedisCommand { - fn from((kind, arguments): (RedisCommandKind, Vec)) -> Self { - RedisCommand { +impl From<(CommandKind, Vec)> for Command { + fn from((kind, arguments): (CommandKind, Vec)) -> Self { + Command { kind, arguments, timed_out: RefCount::new(AtomicBool::new(false)), timeout_dur: None, response: ResponseKind::Respond(None), hasher: ClusterHash::default(), - router_tx: RefCount::new(Mutex::new(None)), attempts_remaining: 0, redirections_remaining: 0, can_pipeline: true, - skip_backpressure: false, transaction_id: None, use_replica: false, cluster_node: None, @@ -1681,20 +1632,18 @@ impl From<(RedisCommandKind, Vec)> for RedisCommand { } } -impl From<(RedisCommandKind, Vec, ResponseSender)> for RedisCommand { - fn from((kind, arguments, tx): (RedisCommandKind, Vec, ResponseSender)) -> Self { - RedisCommand { +impl From<(CommandKind, Vec, ResponseSender)> for Command { + fn from((kind, arguments, tx): (CommandKind, Vec, ResponseSender)) -> Self { + Command { kind, arguments, response: ResponseKind::Respond(Some(tx)), timed_out: RefCount::new(AtomicBool::new(false)), timeout_dur: None, hasher: ClusterHash::default(), - router_tx: RefCount::new(Mutex::new(None)), attempts_remaining: 0, redirections_remaining: 0, can_pipeline: true, - skip_backpressure: false, transaction_id: None, use_replica: false, cluster_node: None, @@ -1713,20 +1662,18 @@ impl From<(RedisCommandKind, Vec, ResponseSender)> for RedisCommand } } -impl From<(RedisCommandKind, Vec, ResponseKind)> for RedisCommand { - fn from((kind, arguments, response): (RedisCommandKind, Vec, ResponseKind)) -> Self { - RedisCommand { +impl From<(CommandKind, Vec, ResponseKind)> for Command { + fn from((kind, arguments, response): (CommandKind, Vec, ResponseKind)) -> Self { + Command { kind, arguments, response, timed_out: RefCount::new(AtomicBool::new(false)), timeout_dur: None, hasher: ClusterHash::default(), - router_tx: RefCount::new(Mutex::new(None)), attempts_remaining: 0, redirections_remaining: 0, can_pipeline: true, - skip_backpressure: false, transaction_id: None, use_replica: false, cluster_node: None, @@ -1745,21 +1692,19 @@ impl From<(RedisCommandKind, Vec, ResponseKind)> for RedisCommand { } } -impl RedisCommand { +impl Command { /// Create a new command without a response handling policy. - pub fn new(kind: RedisCommandKind, arguments: Vec) -> Self { - RedisCommand { + pub fn new(kind: CommandKind, arguments: Vec) -> Self { + Command { kind, arguments, timed_out: RefCount::new(AtomicBool::new(false)), timeout_dur: None, response: ResponseKind::Respond(None), hasher: ClusterHash::default(), - router_tx: RefCount::new(Mutex::new(None)), attempts_remaining: 1, redirections_remaining: 1, can_pipeline: true, - skip_backpressure: false, transaction_id: None, use_replica: false, cluster_node: None, @@ -1779,18 +1724,16 @@ impl RedisCommand { /// Create a new empty `ASKING` command. pub fn new_asking(hash_slot: u16) -> Self { - RedisCommand { - kind: RedisCommandKind::Asking, + Command { + kind: CommandKind::Asking, hasher: ClusterHash::Custom(hash_slot), arguments: Vec::new(), timed_out: RefCount::new(AtomicBool::new(false)), timeout_dur: None, response: ResponseKind::Respond(None), - router_tx: RefCount::new(Mutex::new(None)), attempts_remaining: 1, redirections_remaining: 1, can_pipeline: true, - skip_backpressure: false, transaction_id: None, use_replica: false, cluster_node: None, @@ -1808,65 +1751,38 @@ impl RedisCommand { } } - /// Whether to pipeline the command. - pub fn should_auto_pipeline(&self, inner: &RefCount, force: bool) -> bool { - let should_pipeline = force - || (inner.is_pipelined() - && self.can_pipeline - && self.kind.can_pipeline() - && !self.blocks_connection() - && !self.is_all_cluster_nodes() - // disable pipelining for transactions to handle ASK errors or support the `abort_on_error` logic - && self.transaction_id.is_none()); - - _trace!( - inner, - "Pipeline check {}: {}", - self.kind.to_str_debug(), - should_pipeline - ); - should_pipeline - } - /// Whether the command should be sent to all cluster nodes concurrently. pub fn is_all_cluster_nodes(&self) -> bool { self.kind.force_all_cluster_nodes() || match self.kind { // since we don't know the hash slot we send this to all nodes - RedisCommandKind::Sunsubscribe => self.arguments.is_empty(), + CommandKind::Sunsubscribe => self.arguments.is_empty(), _ => false, } } /// Whether errors writing the command should be returned to the caller. - pub fn should_finish_with_error(&self, inner: &RefCount) -> bool { + pub fn should_finish_with_error(&self, inner: &RefCount) -> bool { self.fail_fast || self.attempts_remaining == 0 || inner.policy.read().is_none() } /// Increment and check the number of write attempts. - pub fn decr_check_attempted(&mut self) -> Result<(), RedisError> { + pub fn decr_check_attempted(&mut self) -> Result<(), Error> { if self.attempts_remaining == 0 { - Err(RedisError::new( - RedisErrorKind::Unknown, - "Too many failed write attempts.", - )) + Err(Error::new(ErrorKind::Unknown, "Too many failed write attempts.")) } else { self.attempts_remaining -= 1; Ok(()) } } - pub fn in_pipelined_transaction(&self) -> bool { - self.transaction_id.is_some() && self.response.is_buffer() - } - - pub fn in_non_pipelined_transaction(&self) -> bool { - self.transaction_id.is_some() && !self.response.is_buffer() + pub fn in_transaction(&self) -> bool { + self.transaction_id.is_some() } - pub fn decr_check_redirections(&mut self) -> Result<(), RedisError> { + pub fn decr_check_redirections(&mut self) -> Result<(), Error> { if self.redirections_remaining == 0 { - Err(RedisError::new(RedisErrorKind::Unknown, "Too many redirections.")) + Err(Error::new(ErrorKind::Routing, "Too many redirections.")) } else { self.redirections_remaining -= 1; Ok(()) @@ -1874,7 +1790,7 @@ impl RedisCommand { } /// Read the arguments associated with the command. - pub fn args(&self) -> &Vec { + pub fn args(&self) -> &Vec { match self.response { ResponseKind::ValueScan(ref inner) => &inner.args, ResponseKind::KeyScan(ref inner) => &inner.args, @@ -1888,7 +1804,7 @@ impl RedisCommand { self.transaction_id.is_none() && (self.kind.is_blocking() || match self.kind { - RedisCommandKind::Xread | RedisCommandKind::Xreadgroup => !self.can_pipeline, + CommandKind::Xread | CommandKind::Xreadgroup => !self.can_pipeline, _ => false, }) } @@ -1902,16 +1818,16 @@ impl RedisCommand { pub fn has_no_responses(&self) -> bool { matches!( self.kind, - RedisCommandKind::Subscribe - | RedisCommandKind::Unsubscribe - | RedisCommandKind::Psubscribe - | RedisCommandKind::Punsubscribe - | RedisCommandKind::Sunsubscribe + CommandKind::Subscribe + | CommandKind::Unsubscribe + | CommandKind::Psubscribe + | CommandKind::Punsubscribe + | CommandKind::Sunsubscribe ) } /// Take the arguments from this command. - pub fn take_args(&mut self) -> Vec { + pub fn take_args(&mut self) -> Vec { match self.response { ResponseKind::ValueScan(ref mut inner) => inner.args.drain(..).collect(), ResponseKind::KeyScan(ref mut inner) => inner.args.drain(..).collect(), @@ -1925,39 +1841,11 @@ impl RedisCommand { mem::replace(&mut self.response, ResponseKind::Skip) } - /// Create a channel on which to block the router, returning the receiver. - pub fn create_router_channel(&self) -> OneshotReceiver { - let (tx, rx) = oneshot_channel(); - let mut guard = self.router_tx.lock(); - *guard = Some(tx); - rx - } - - /// Send a message to unblock the router loop, if necessary. - pub fn respond_to_router(&self, inner: &RefCount, cmd: RouterResponse) { - #[allow(unused_mut)] - if let Some(mut tx) = self.router_tx.lock().take() { - if tx.send(cmd).is_err() { - _debug!(inner, "Failed to unblock router loop."); - } - } - } - - /// Take the router sender from the command. - pub fn take_router_tx(&self) -> Option { - self.router_tx.lock().take() - } - - /// Whether the command has a channel to the router. - pub fn has_router_channel(&self) -> bool { - self.router_tx.lock().is_some() - } - /// Clone the command, supporting commands with shared response state. /// /// Note: this will **not** clone the router channel. pub fn duplicate(&self, response: ResponseKind) -> Self { - RedisCommand { + Command { timed_out: RefCount::new(AtomicBool::new(false)), kind: self.kind.clone(), arguments: self.arguments.clone(), @@ -1967,8 +1855,6 @@ impl RedisCommand { redirections_remaining: self.redirections_remaining, timeout_dur: self.timeout_dur, can_pipeline: self.can_pipeline, - skip_backpressure: self.skip_backpressure, - router_tx: self.router_tx.clone(), cluster_node: self.cluster_node.clone(), fail_fast: self.fail_fast, response, @@ -1987,7 +1873,7 @@ impl RedisCommand { } /// Inherit connection and perf settings from the client. - pub fn inherit_options(&mut self, inner: &RefCount) { + pub fn inherit_options(&mut self, inner: &RefCount) { if self.attempts_remaining == 0 { self.attempts_remaining = inner.connection.max_command_attempts; } @@ -2035,21 +1921,21 @@ impl RedisCommand { } /// Respond to the caller, taking the response channel in the process. - pub fn respond_to_caller(&mut self, result: Result) { + pub fn respond_to_caller(&mut self, result: Result) { match self.response { ResponseKind::KeyScanBuffered(ref inner) => { if let Err(error) = result { - let _ = inner.tx.send(Err(error)); + let _ = inner.tx.try_send(Err(error)); } }, ResponseKind::KeyScan(ref inner) => { if let Err(error) = result { - let _ = inner.tx.send(Err(error)); + let _ = inner.tx.try_send(Err(error)); } }, ResponseKind::ValueScan(ref inner) => { if let Err(error) = result { - let _ = inner.tx.send(Err(error)); + let _ = inner.tx.try_send(Err(error)); } }, _ => @@ -2062,12 +1948,6 @@ impl RedisCommand { } } - /// Finish the command, responding to both the caller and router. - pub fn finish(mut self, inner: &RefCount, result: Result) { - self.respond_to_caller(result); - self.respond_to_router(inner, RouterResponse::Continue); - } - /// Read the first key in the arguments according to the `FirstKey` cluster hash policy. pub fn first_key(&self) -> Option<&[u8]> { ClusterHash::FirstKey.find_key(self.args()) @@ -2092,13 +1972,13 @@ impl RedisCommand { } /// Convert to a single frame with an array of bulk strings (or null). - pub fn to_frame(&self, is_resp3: bool) -> Result { + pub fn to_frame(&self, is_resp3: bool) -> Result { protocol_utils::command_to_frame(self, is_resp3) } /// Convert to a single frame with an array of bulk strings (or null), using a blocking task. #[cfg(all(feature = "blocking-encoding", not(feature = "glommio")))] - pub fn to_frame_blocking(&self, is_resp3: bool, blocking_threshold: usize) -> Result { + pub fn to_frame_blocking(&self, is_resp3: bool, blocking_threshold: usize) -> Result { let cmd_size = protocol_utils::args_size(self.args()); if cmd_size >= blocking_threshold { @@ -2132,80 +2012,49 @@ impl RedisCommand { /// A message sent from the front-end client to the router. pub enum RouterCommand { /// Send a command to the server. - Command(RedisCommand), + Command(Command), /// Send a pipelined series of commands to the server. - Pipeline { commands: Vec }, + Pipeline { commands: Vec }, /// Send a transaction to the server. - // Notes: - // * The inner command buffer will not contain the trailing `EXEC` command. - // * Transactions are never pipelined in order to handle ASK responses. - // * IDs must be unique w/r/t other transactions buffered in memory. - // - // There is one special failure mode that must be considered: - // 1. The client sends `MULTI` and we receive an `OK` response. - // 2. The caller sends `GET foo{1}` and we receive a `QUEUED` response. - // 3. The caller sends `GET bar{1}` and we receive an `ASK` response. - // - // According to the cluster spec the client should retry the entire transaction against the node in the `ASK` - // response, but with an `ASKING` command before `MULTI`. However, the future returned to the caller from `GET - // foo{1}` will have already finished at this point. To account for this the client will never pipeline - // transactions against a cluster, and may clone commands before sending them in order to replay them later with - // a different cluster node mapping. + // The inner command buffer will not contain the trailing `EXEC` command. #[cfg(feature = "transactions")] Transaction { id: u64, - commands: Vec, + commands: Vec, abort_on_error: bool, - pipelined: bool, tx: ResponseSender, }, + /// Initiate a reconnection to the provided server, or all servers. + Reconnect { + server: Option, + force: bool, + tx: Option, + #[cfg(feature = "replicas")] + replica: bool, + }, /// Retry a command after a `MOVED` error. - // This will trigger a call to `CLUSTER SLOTS` before the command is retried. Moved { slot: u16, server: Server, - command: RedisCommand, + command: Command, }, /// Retry a command after an `ASK` error. - // This is typically used instead of `RouterResponse::Ask` when a command was pipelined. Ask { slot: u16, server: Server, - command: RedisCommand, - }, - /// Initiate a reconnection to the provided server, or all servers. - // The client may not perform a reconnection if a healthy connection exists to `server`, unless `force` is `true`. - Reconnect { - server: Option, - force: bool, - tx: Option, - #[cfg(feature = "replicas")] - replica: bool, + command: Command, }, /// Sync the cached cluster state with the server via `CLUSTER SLOTS`. - SyncCluster { tx: OneshotSender> }, - /// Read the set of active connections managed by the client. - Connections { tx: OneshotSender> }, + SyncCluster { tx: OneshotSender> }, /// Force sync the replica routing table with the server(s). #[cfg(feature = "replicas")] SyncReplicas { - tx: OneshotSender>, + tx: OneshotSender>, reset: bool, }, } impl RouterCommand { - /// Whether the client should skip backpressure on the command buffer when sending this command. - pub fn should_skip_backpressure(&self) -> bool { - matches!( - *self, - RouterCommand::Moved { .. } - | RouterCommand::Ask { .. } - | RouterCommand::SyncCluster { .. } - | RouterCommand::Connections { .. } - ) - } - /// Whether the command should check the health of the backing connections before being used. pub fn should_check_fail_fast(&self) -> bool { match self { @@ -2219,7 +2068,7 @@ impl RouterCommand { /// Finish the command early with the provided error. #[allow(unused_mut)] - pub fn finish_with_error(self, error: RedisError) { + pub fn finish_with_error(self, error: Error) { match self { RouterCommand::Command(mut command) => { command.respond_to_caller(Err(error)); @@ -2245,7 +2094,7 @@ impl RouterCommand { } /// Inherit settings from the configuration structs on `inner`. - pub fn inherit_options(&mut self, inner: &RefCount) { + pub fn inherit_options(&mut self, inner: &RefCount) { match self { RouterCommand::Command(ref mut cmd) => { cmd.inherit_options(inner); @@ -2282,20 +2131,6 @@ impl fmt::Debug for RouterCommand { let mut formatter = f.debug_struct("RouterCommand"); match self { - RouterCommand::Ask { server, slot, command } => { - formatter - .field("kind", &"Ask") - .field("server", &server) - .field("slot", &slot) - .field("command", &command.kind.to_str_debug()); - }, - RouterCommand::Moved { server, slot, command } => { - formatter - .field("kind", &"Moved") - .field("server", &server) - .field("slot", &slot) - .field("command", &command.kind.to_str_debug()); - }, #[cfg(not(feature = "replicas"))] RouterCommand::Reconnect { server, force, .. } => { formatter @@ -2323,8 +2158,11 @@ impl fmt::Debug for RouterCommand { RouterCommand::Pipeline { .. } => { formatter.field("kind", &"Pipeline"); }, - RouterCommand::Connections { .. } => { - formatter.field("kind", &"Connections"); + RouterCommand::Ask { .. } => { + formatter.field("kind", &"Ask"); + }, + RouterCommand::Moved { .. } => { + formatter.field("kind", &"Moved"); }, RouterCommand::Command(command) => { formatter @@ -2342,8 +2180,8 @@ impl fmt::Debug for RouterCommand { } } -impl From for RouterCommand { - fn from(cmd: RedisCommand) -> Self { +impl From for RouterCommand { + fn from(cmd: Command) -> Self { RouterCommand::Command(cmd) } } diff --git a/src/protocol/connection.rs b/src/protocol/connection.rs index 3ab18296..fe1f495b 100644 --- a/src/protocol/connection.rs +++ b/src/protocol/connection.rs @@ -1,34 +1,35 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, + error::{Error, ErrorKind}, + modules::inner::ClientInner, protocol::{ - codec::RedisCodec, - command::{RedisCommand, RedisCommandKind, RouterResponse}, + codec::Codec, + command::{Command, CommandKind}, types::{ProtocolFrame, Server}, utils as protocol_utils, }, - runtime::{AtomicBool, AtomicUsize, JoinHandle, RefCount}, + router::{centralized, clustered, responses}, + runtime::{AtomicUsize, RefCount}, types::InfoKind, utils as client_utils, utils, }; use bytes_utils::Str; -use crossbeam_queue::SegQueue; use futures::{ sink::SinkExt, - stream::{SplitSink, SplitStream, StreamExt}, + stream::{Peekable, StreamExt}, Sink, Stream, }; use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame, RespVersion}; use semver::Version; use std::{ + collections::VecDeque, fmt, net::SocketAddr, pin::Pin, str, task::{Context, Poll}, - time::Duration, + time::{Duration, Instant}, }; use tokio_util::codec::Framed; @@ -54,9 +55,7 @@ use crate::prelude::ServerConfig; ))] use crate::protocol::tls::TlsConnector; #[cfg(feature = "replicas")] -use crate::runtime::oneshot_channel; -#[cfg(feature = "replicas")] -use crate::{protocol::responders::ResponseKind, types::RedisValue}; +use crate::types::Value; #[cfg(feature = "unix-sockets")] use std::path::Path; #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] @@ -72,68 +71,15 @@ use tokio_rustls::client::TlsStream as RustlsStream; pub const OK: &str = "OK"; /// The timeout duration used when dropping the split sink and waiting on the split stream to close. pub const CONNECTION_CLOSE_TIMEOUT_MS: u64 = 5_000; - -pub type CommandBuffer = Vec; - -/// A shared buffer across tasks. -#[derive(Clone, Debug)] -pub struct SharedBuffer { - inner: RefCount>, - blocked: RefCount, -} - -impl SharedBuffer { - pub fn new() -> Self { - SharedBuffer { - inner: RefCount::new(SegQueue::new()), - blocked: RefCount::new(AtomicBool::new(false)), - } - } - - pub fn push(&self, cmd: RedisCommand) { - self.inner.push(cmd); - } - - pub fn pop(&self) -> Option { - self.inner.pop() - } - - pub fn len(&self) -> usize { - self.inner.len() - } - - pub fn set_blocked(&self) { - utils::set_bool_atomic(&self.blocked, true); - } - - pub fn set_unblocked(&self) { - utils::set_bool_atomic(&self.blocked, false); - } - - pub fn is_blocked(&self) -> bool { - utils::read_bool_atomic(&self.blocked) - } - - pub fn drain(&self) -> Vec { - utils::set_bool_atomic(&self.blocked, false); - let mut out = Vec::with_capacity(self.inner.len()); - while let Some(cmd) = self.inner.pop() { - out.push(cmd); - } - out - } -} - -pub type SplitRedisSink = SplitSink, ProtocolFrame>; -pub type SplitRedisStream = SplitStream>; +pub const INITIAL_BUFFER_SIZE: usize = 64; /// Connect to each socket addr and return the first successful connection. async fn tcp_connect_any( - inner: &RefCount, + inner: &RefCount, server: &Server, addrs: &Vec, -) -> Result<(TcpStream, SocketAddr), RedisError> { - let mut last_error: Option = None; +) -> Result<(TcpStream, SocketAddr), Error> { + let mut last_error: Option = None; for addr in addrs.iter() { _debug!( @@ -176,48 +122,21 @@ async fn tcp_connect_any( } _trace!(inner, "Failed to connect to any of {:?}.", addrs); - Err(last_error.unwrap_or(RedisError::new(RedisErrorKind::IO, "Failed to connect."))) + Err(last_error.unwrap_or(Error::new(ErrorKind::IO, "Failed to connect."))) } pub enum ConnectionKind { - Tcp(Framed), + Tcp(Peekable>), #[cfg(feature = "unix-sockets")] - Unix(Framed), + Unix(Peekable>), #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - Rustls(Framed, RedisCodec>), + Rustls(Peekable, Codec>>), #[cfg(feature = "enable-native-tls")] - NativeTls(Framed, RedisCodec>), -} - -impl ConnectionKind { - /// Split the connection. - pub fn split(self) -> (SplitSinkKind, SplitStreamKind) { - match self { - ConnectionKind::Tcp(conn) => { - let (sink, stream) = conn.split(); - (SplitSinkKind::Tcp(sink), SplitStreamKind::Tcp(stream)) - }, - #[cfg(feature = "unix-sockets")] - ConnectionKind::Unix(conn) => { - let (sink, stream) = conn.split(); - (SplitSinkKind::Unix(sink), SplitStreamKind::Unix(stream)) - }, - #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - ConnectionKind::Rustls(conn) => { - let (sink, stream) = conn.split(); - (SplitSinkKind::Rustls(sink), SplitStreamKind::Rustls(stream)) - }, - #[cfg(feature = "enable-native-tls")] - ConnectionKind::NativeTls(conn) => { - let (sink, stream) = conn.split(); - (SplitSinkKind::NativeTls(sink), SplitStreamKind::NativeTls(stream)) - }, - } - } + NativeTls(Peekable, Codec>>), } impl Stream for ConnectionKind { - type Item = Result; + type Item = Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.get_mut() { @@ -245,7 +164,7 @@ impl Stream for ConnectionKind { } impl Sink for ConnectionKind { - type Error = RedisError; + type Error = Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.get_mut() { @@ -296,112 +215,10 @@ impl Sink for ConnectionKind { } } -pub enum SplitStreamKind { - Tcp(SplitRedisStream), - #[cfg(feature = "unix-sockets")] - Unix(SplitRedisStream), - #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - Rustls(SplitRedisStream>), - #[cfg(feature = "enable-native-tls")] - NativeTls(SplitRedisStream>), -} - -impl Stream for SplitStreamKind { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - SplitStreamKind::Tcp(ref mut conn) => Pin::new(conn).poll_next(cx), - #[cfg(feature = "unix-sockets")] - SplitStreamKind::Unix(ref mut conn) => Pin::new(conn).poll_next(cx), - #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - SplitStreamKind::Rustls(ref mut conn) => Pin::new(conn).poll_next(cx), - #[cfg(feature = "enable-native-tls")] - SplitStreamKind::NativeTls(ref mut conn) => Pin::new(conn).poll_next(cx), - } - } - - fn size_hint(&self) -> (usize, Option) { - match self { - SplitStreamKind::Tcp(ref conn) => conn.size_hint(), - #[cfg(feature = "unix-sockets")] - SplitStreamKind::Unix(ref conn) => conn.size_hint(), - #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - SplitStreamKind::Rustls(ref conn) => conn.size_hint(), - #[cfg(feature = "enable-native-tls")] - SplitStreamKind::NativeTls(ref conn) => conn.size_hint(), - } - } -} - -pub enum SplitSinkKind { - Tcp(SplitRedisSink), - #[cfg(feature = "unix-sockets")] - Unix(SplitRedisSink), - #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - Rustls(SplitRedisSink>), - #[cfg(feature = "enable-native-tls")] - NativeTls(SplitRedisSink>), -} - -impl Sink for SplitSinkKind { - type Error = RedisError; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - SplitSinkKind::Tcp(ref mut conn) => Pin::new(conn).poll_ready(cx), - #[cfg(feature = "unix-sockets")] - SplitSinkKind::Unix(ref mut conn) => Pin::new(conn).poll_ready(cx), - #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - SplitSinkKind::Rustls(ref mut conn) => Pin::new(conn).poll_ready(cx), - #[cfg(feature = "enable-native-tls")] - SplitSinkKind::NativeTls(ref mut conn) => Pin::new(conn).poll_ready(cx), - } - } - - fn start_send(self: Pin<&mut Self>, item: ProtocolFrame) -> Result<(), Self::Error> { - match self.get_mut() { - SplitSinkKind::Tcp(ref mut conn) => Pin::new(conn).start_send(item), - #[cfg(feature = "unix-sockets")] - SplitSinkKind::Unix(ref mut conn) => Pin::new(conn).start_send(item), - #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - SplitSinkKind::Rustls(ref mut conn) => Pin::new(conn).start_send(item), - #[cfg(feature = "enable-native-tls")] - SplitSinkKind::NativeTls(ref mut conn) => Pin::new(conn).start_send(item), - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - SplitSinkKind::Tcp(ref mut conn) => Pin::new(conn).poll_flush(cx).map_err(|e| e), - #[cfg(feature = "unix-sockets")] - SplitSinkKind::Unix(ref mut conn) => Pin::new(conn).poll_flush(cx).map_err(|e| e), - #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - SplitSinkKind::Rustls(ref mut conn) => Pin::new(conn).poll_flush(cx).map_err(|e| e), - #[cfg(feature = "enable-native-tls")] - SplitSinkKind::NativeTls(ref mut conn) => Pin::new(conn).poll_flush(cx).map_err(|e| e), - } - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - SplitSinkKind::Tcp(ref mut conn) => Pin::new(conn).poll_close(cx).map_err(|e| e), - #[cfg(feature = "unix-sockets")] - SplitSinkKind::Unix(ref mut conn) => Pin::new(conn).poll_close(cx).map_err(|e| e), - #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] - SplitSinkKind::Rustls(ref mut conn) => Pin::new(conn).poll_close(cx).map_err(|e| e), - #[cfg(feature = "enable-native-tls")] - SplitSinkKind::NativeTls(ref mut conn) => Pin::new(conn).poll_close(cx).map_err(|e| e), - } - } -} - /// Atomic counters stored with connection state. -// TODO with glommio these don't need to be atomics #[derive(Clone, Debug)] pub struct Counters { pub cmd_buffer_len: RefCount, - pub in_flight: RefCount, pub feed_count: RefCount, } @@ -409,13 +226,12 @@ impl Counters { pub fn new(cmd_buffer_len: &RefCount) -> Self { Counters { cmd_buffer_len: cmd_buffer_len.clone(), - in_flight: RefCount::new(AtomicUsize::new(0)), feed_count: RefCount::new(AtomicUsize::new(0)), } } /// Flush the sink if the max feed count is reached or no commands are queued following the current command. - pub fn should_send(&self, inner: &RefCount) -> bool { + pub fn should_send(&self, inner: &RefCount) -> bool { client_utils::read_atomic(&self.feed_count) as u64 > inner.max_feed_count() || client_utils::read_atomic(&self.cmd_buffer_len) == 0 } @@ -424,24 +240,13 @@ impl Counters { client_utils::incr_atomic(&self.feed_count) } - pub fn incr_in_flight(&self) -> usize { - client_utils::incr_atomic(&self.in_flight) - } - - pub fn decr_in_flight(&self) -> usize { - client_utils::decr_atomic(&self.in_flight) - } - pub fn reset_feed_count(&self) { client_utils::set_atomic(&self.feed_count, 0); } - - pub fn reset_in_flight(&self) { - client_utils::set_atomic(&self.in_flight, 0); - } } -pub struct RedisTransport { +/// A connection to Redis that is not auto-pipelined and cannot be shared across client tasks. +pub struct ExclusiveConnection { /// An identifier for the connection, usually `|:`. pub server: Server, /// The parsed `SocketAddr` for the connection. @@ -458,21 +263,21 @@ pub struct RedisTransport { pub counters: Counters, } -impl RedisTransport { - pub async fn new_tcp(inner: &RefCount, server: &Server) -> Result { +impl ExclusiveConnection { + pub async fn new_tcp(inner: &RefCount, server: &Server) -> Result { let counters = Counters::new(&inner.counters.cmd_buffer_len); let (id, version) = (None, None); let default_host = server.host.clone(); - let codec = RedisCodec::new(inner, server); + let codec = Codec::new(inner, server); let addrs = inner .get_resolver() .await .resolve(server.host.clone(), server.port) .await?; let (socket, addr) = tcp_connect_any(inner, server, &addrs).await?; - let transport = ConnectionKind::Tcp(Framed::new(socket, codec)); + let transport = ConnectionKind::Tcp(Framed::new(socket, codec).peekable()); - Ok(RedisTransport { + Ok(ExclusiveConnection { server: server.clone(), addr: Some(addr), default_host, @@ -484,17 +289,17 @@ impl RedisTransport { } #[cfg(feature = "unix-sockets")] - pub async fn new_unix(inner: &RefCount, path: &Path) -> Result { + pub async fn new_unix(inner: &RefCount, path: &Path) -> Result { _debug!(inner, "Connecting via unix socket to {}", utils::path_to_string(path)); let server = Server::new(utils::path_to_string(path), 0); let counters = Counters::new(&inner.counters.cmd_buffer_len); let (id, version) = (None, None); let default_host = server.host.clone(); - let codec = RedisCodec::new(inner, &server); + let codec = Codec::new(inner, &server); let socket = UnixStream::connect(path).await?; - let transport = ConnectionKind::Unix(Framed::new(socket, codec)); + let transport = ConnectionKind::Unix(Framed::new(socket, codec).peekable()); - Ok(RedisTransport { + Ok(ExclusiveConnection { addr: None, server, default_host, @@ -507,16 +312,13 @@ impl RedisTransport { #[cfg(feature = "enable-native-tls")] #[allow(unreachable_patterns)] - pub async fn new_native_tls( - inner: &RefCount, - server: &Server, - ) -> Result { + pub async fn new_native_tls(inner: &RefCount, server: &Server) -> Result { let connector = match inner.config.tls { Some(ref config) => match config.connector { TlsConnector::Native(ref connector) => connector.clone(), - _ => return Err(RedisError::new(RedisErrorKind::Tls, "Invalid TLS configuration.")), + _ => return Err(Error::new(ErrorKind::Tls, "Invalid TLS configuration.")), }, - None => return RedisTransport::new_tcp(inner, server).await, + None => return ExclusiveConnection::new_tcp(inner, server).await, }; let counters = Counters::new(&inner.counters.cmd_buffer_len); @@ -524,7 +326,7 @@ impl RedisTransport { let tls_server_name = server.tls_server_name.as_ref().cloned().unwrap_or(server.host.clone()); let default_host = server.host.clone(); - let codec = RedisCodec::new(inner, server); + let codec = Codec::new(inner, server); let addrs = inner .get_resolver() .await @@ -534,9 +336,9 @@ impl RedisTransport { _debug!(inner, "native-tls handshake with server name/host: {}", tls_server_name); let socket = connector.clone().connect(&tls_server_name, socket).await?; - let transport = ConnectionKind::NativeTls(Framed::new(socket, codec)); + let transport = ConnectionKind::NativeTls(Framed::new(socket, codec).peekable()); - Ok(RedisTransport { + Ok(ExclusiveConnection { server: server.clone(), addr: Some(addr), default_host, @@ -548,24 +350,21 @@ impl RedisTransport { } #[cfg(not(feature = "enable-native-tls"))] - pub async fn new_native_tls( - inner: &RefCount, - server: &Server, - ) -> Result { - RedisTransport::new_tcp(inner, server).await + pub async fn new_native_tls(inner: &RefCount, server: &Server) -> Result { + ExclusiveConnection::new_tcp(inner, server).await } #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] #[allow(unreachable_patterns)] - pub async fn new_rustls(inner: &RefCount, server: &Server) -> Result { + pub async fn new_rustls(inner: &RefCount, server: &Server) -> Result { use rustls::pki_types::ServerName; let connector = match inner.config.tls { Some(ref config) => match config.connector { TlsConnector::Rustls(ref connector) => connector.clone(), - _ => return Err(RedisError::new(RedisErrorKind::Tls, "Invalid TLS configuration.")), + _ => return Err(Error::new(ErrorKind::Tls, "Invalid TLS configuration.")), }, - None => return RedisTransport::new_tcp(inner, server).await, + None => return ExclusiveConnection::new_tcp(inner, server).await, }; let counters = Counters::new(&inner.counters.cmd_buffer_len); @@ -573,7 +372,7 @@ impl RedisTransport { let tls_server_name = server.tls_server_name.as_ref().cloned().unwrap_or(server.host.clone()); let default_host = server.host.clone(); - let codec = RedisCodec::new(inner, server); + let codec = Codec::new(inner, server); let addrs = inner .get_resolver() .await @@ -584,9 +383,9 @@ impl RedisTransport { _debug!(inner, "rustls handshake with server name/host: {:?}", tls_server_name); let socket = connector.clone().connect(server_name.to_owned(), socket).await?; - let transport = ConnectionKind::Rustls(Framed::new(socket, codec)); + let transport = ConnectionKind::Rustls(Framed::new(socket, codec).peekable()); - Ok(RedisTransport { + Ok(ExclusiveConnection { server: server.clone(), addr: Some(addr), counters, @@ -598,12 +397,12 @@ impl RedisTransport { } #[cfg(not(any(feature = "enable-rustls", feature = "enable-rustls-ring")))] - pub async fn new_rustls(inner: &RefCount, server: &Server) -> Result { - RedisTransport::new_tcp(inner, server).await + pub async fn new_rustls(inner: &RefCount, server: &Server) -> Result { + ExclusiveConnection::new_tcp(inner, server).await } /// Send a command to the server. - pub async fn request_response(&mut self, cmd: RedisCommand, is_resp3: bool) -> Result { + pub async fn request_response(&mut self, cmd: Command, is_resp3: bool) -> Result { let frame = cmd.to_frame(is_resp3)?; self.transport.send(frame).await?; @@ -614,10 +413,10 @@ impl RedisTransport { } /// Set the client name with `CLIENT SETNAME`. - pub async fn set_client_name(&mut self, inner: &RefCount) -> Result<(), RedisError> { + pub async fn set_client_name(&mut self, inner: &RefCount) -> Result<(), Error> { _debug!(inner, "Setting client name."); let name = &inner.id; - let command = RedisCommand::new(RedisCommandKind::ClientSetname, vec![name.clone().into()]); + let command = Command::new(CommandKind::ClientSetname, vec![name.clone().into()]); let response = self.request_response(command, inner.is_resp3()).await?; if protocol_utils::is_ok(&response) { @@ -625,13 +424,13 @@ impl RedisTransport { Ok(()) } else { error!("{} Failed to set client name with error {:?}", name, response); - Err(RedisError::new(RedisErrorKind::Protocol, "Failed to set client name.")) + Err(Error::new(ErrorKind::Protocol, "Failed to set client name.")) } } /// Read and cache the server version. - pub async fn cache_server_version(&mut self, inner: &RefCount) -> Result<(), RedisError> { - let command = RedisCommand::new(RedisCommandKind::Info, vec![InfoKind::Server.to_str().into()]); + pub async fn cache_server_version(&mut self, inner: &RefCount) -> Result<(), Error> { + let command = Command::new(CommandKind::Info, vec![InfoKind::Server.to_str().into()]); let result = self.request_response(command, inner.is_resp3()).await?; let result = match result { Resp3Frame::SimpleString { data, .. } => String::from_utf8(data.to_vec())?, @@ -677,14 +476,14 @@ impl RedisTransport { username: Option, password: Option, is_resp3: bool, - ) -> Result<(), RedisError> { + ) -> Result<(), Error> { if let Some(password) = password { let args = if let Some(username) = username { vec![username.into(), password.into()] } else { vec![password.into()] }; - let command = RedisCommand::new(RedisCommandKind::Auth, args); + let command = Command::new(CommandKind::Auth, args); debug!("{}: Authenticating Redis client...", name); let frame = self.request_response(command, is_resp3).await?; @@ -701,10 +500,7 @@ impl RedisTransport { } /// Authenticate via HELLO in RESP3 mode or AUTH in RESP2 mode, then set the client name. - pub async fn switch_protocols_and_authenticate( - &mut self, - inner: &RefCount, - ) -> Result<(), RedisError> { + pub async fn switch_protocols_and_authenticate(&mut self, inner: &RefCount) -> Result<(), Error> { // reset the protocol version to the one specified by the config when we create new connections inner.reset_protocol_version(); let (username, password) = inner.read_credentials(&self.server).await?; @@ -721,7 +517,7 @@ impl RedisTransport { vec![] }; - let cmd = RedisCommand::new(RedisCommandKind::_Hello(RespVersion::RESP3), args); + let cmd = Command::new(CommandKind::_Hello(RespVersion::RESP3), args); let response = self.request_response(cmd, true).await?; let response = protocol_utils::frame_to_results(response)?; inner.switch_protocol_versions(RespVersion::RESP3); @@ -734,8 +530,8 @@ impl RedisTransport { } /// Read and cache the connection ID. - pub async fn cache_connection_id(&mut self, inner: &RefCount) -> Result<(), RedisError> { - let command = (RedisCommandKind::ClientID, vec![]).into(); + pub async fn cache_connection_id(&mut self, inner: &RefCount) -> Result<(), Error> { + let command = (CommandKind::ClientID, vec![]).into(); let result = self.request_response(command, inner.is_resp3()).await; _debug!(inner, "Read client ID: {:?}", result); self.id = match result { @@ -747,8 +543,8 @@ impl RedisTransport { } /// Send `PING` to the server. - pub async fn ping(&mut self, inner: &RefCount) -> Result<(), RedisError> { - let command = RedisCommandKind::Ping.into(); + pub async fn ping(&mut self, inner: &RefCount) -> Result<(), Error> { + let command = CommandKind::Ping.into(); let response = self.request_response(command, inner.is_resp3()).await?; if let Some(e) = protocol_utils::frame_to_error(&response) { @@ -759,7 +555,7 @@ impl RedisTransport { } /// Send `QUIT` and close the connection. - pub async fn disconnect(&mut self, inner: &RefCount) -> Result<(), RedisError> { + pub async fn disconnect(&mut self, inner: &RefCount) -> Result<(), Error> { if let Err(e) = self.transport.close().await { _warn!(inner, "Error closing connection to {}: {:?}", self.server, e); } @@ -767,7 +563,7 @@ impl RedisTransport { } /// Select the database provided in the `RedisConfig`. - pub async fn select_database(&mut self, inner: &RefCount) -> Result<(), RedisError> { + pub async fn select_database(&mut self, inner: &RefCount) -> Result<(), Error> { if inner.config.server.is_clustered() { return Ok(()); } @@ -778,7 +574,7 @@ impl RedisTransport { }; _trace!(inner, "Selecting database {} after connecting.", db); - let command = RedisCommand::new(RedisCommandKind::Select, vec![(db as i64).into()]); + let command = Command::new(CommandKind::Select, vec![(db as i64).into()]); let response = self.request_response(command, inner.is_resp3()).await?; if let Some(error) = protocol_utils::frame_to_error(&response) { @@ -791,13 +587,13 @@ impl RedisTransport { /// Check the `cluster_state` via `CLUSTER INFO`. /// /// Returns an error if the state is not `ok`. - pub async fn check_cluster_state(&mut self, inner: &RefCount) -> Result<(), RedisError> { + pub async fn check_cluster_state(&mut self, inner: &RefCount) -> Result<(), Error> { if !inner.config.server.is_clustered() { return Ok(()); } _trace!(inner, "Checking cluster info for {}", self.server); - let command = RedisCommand::new(RedisCommandKind::ClusterInfo, vec![]); + let command = Command::new(CommandKind::ClusterInfo, vec![]); let response = self.request_response(command, inner.is_resp3()).await?; let response: String = protocol_utils::frame_to_results(response)?.convert()?; @@ -808,19 +604,12 @@ impl RedisTransport { } } - Err(RedisError::new( - RedisErrorKind::Protocol, - "Invalid or missing cluster state.", - )) + Err(Error::new(ErrorKind::Protocol, "Invalid or missing cluster state.")) } /// Authenticate, set the protocol version, set the client name, select the provided database, cache the /// connection ID and server version, and check the cluster state (if applicable). - pub async fn setup( - &mut self, - inner: &RefCount, - timeout: Option, - ) -> Result<(), RedisError> { + pub async fn setup(&mut self, inner: &RefCount, timeout: Option) -> Result<(), Error> { let timeout = timeout.unwrap_or(inner.internal_command_timeout()); let has_credentials = inner.config.password.is_some() || inner.config.version == RespVersion::RESP3; #[cfg(feature = "credential-provider")] @@ -843,7 +632,7 @@ impl RedisTransport { self.check_cluster_state(inner).await?; } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }, timeout, ) @@ -852,11 +641,7 @@ impl RedisTransport { /// Send `READONLY` to the server. #[cfg(feature = "replicas")] - pub async fn readonly( - &mut self, - inner: &RefCount, - timeout: Option, - ) -> Result<(), RedisError> { + pub async fn readonly(&mut self, inner: &RefCount, timeout: Option) -> Result<(), Error> { if !inner.config.server.is_clustered() { return Ok(()); } @@ -865,11 +650,11 @@ impl RedisTransport { utils::timeout( async { _debug!(inner, "Sending READONLY to {}", self.server); - let command = RedisCommand::new(RedisCommandKind::Readonly, vec![]); + let command = Command::new(CommandKind::Readonly, vec![]); let response = self.request_response(command, inner.is_resp3()).await?; let _ = protocol_utils::frame_to_results(response)?; - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }, timeout, ) @@ -878,13 +663,9 @@ impl RedisTransport { /// Send the `ROLE` command to the server. #[cfg(feature = "replicas")] - pub async fn role( - &mut self, - inner: &RefCount, - timeout: Option, - ) -> Result { + pub async fn role(&mut self, inner: &RefCount, timeout: Option) -> Result { let timeout = timeout.unwrap_or(inner.internal_command_timeout()); - let command = RedisCommand::new(RedisCommandKind::Role, vec![]); + let command = Command::new(CommandKind::Role, vec![]); utils::timeout( async { @@ -900,7 +681,7 @@ impl RedisTransport { /// Discover connected replicas via the ROLE command. #[cfg(feature = "replicas")] - pub async fn discover_replicas(&mut self, inner: &RefCount) -> Result, RedisError> { + pub async fn discover_replicas(&mut self, inner: &RefCount) -> Result, Error> { self .role(inner, None) .await @@ -909,89 +690,56 @@ impl RedisTransport { /// Discover connected replicas via the ROLE command. #[cfg(not(feature = "replicas"))] - pub async fn discover_replicas(&mut self, _: &RefCount) -> Result, RedisError> { + pub async fn discover_replicas(&mut self, _: &RefCount) -> Result, Error> { Ok(Vec::new()) } - /// Split the transport into reader/writer halves. - pub fn split(self) -> (RedisWriter, RedisReader) { - let buffer = SharedBuffer::new(); + /// Convert the connection into one that can be shared and pipelined across tasks. + pub fn into_pipelined(self, _replica: bool) -> Connection { + let buffer = VecDeque::with_capacity(INITIAL_BUFFER_SIZE); let (server, addr, default_host) = (self.server, self.addr, self.default_host); - let (sink, stream) = self.transport.split(); let (id, version, counters) = (self.id, self.version, self.counters); - let writer = RedisWriter { - sink, - id, - version, + Connection { + server, default_host, - counters: counters.clone(), - server: server.clone(), addr, - buffer: buffer.clone(), - reader: None, - }; - let reader = RedisReader { - stream: Some(stream), - task: None, - server, buffer, + version, counters, - }; - (writer, reader) - } -} - -pub struct RedisReader { - pub stream: Option, - pub server: Server, - pub buffer: SharedBuffer, - pub counters: Counters, - pub task: Option>>, -} - -impl RedisReader { - pub async fn wait(&mut self) -> Result<(), RedisError> { - if let Some(ref mut task) = self.task { - task.await? - } else { - Ok(()) - } - } - - pub fn is_connected(&self) -> bool { - self.task.is_some() || self.stream.is_some() - } - - pub fn is_running(&self) -> bool { - self.task.is_some() - } - - pub fn stop(&mut self, abort: bool) { - if abort && self.task.is_some() { - self.task.take().unwrap().abort(); - } else { - self.task = None; + id, + last_write: None, + transport: self.transport, + blocked: false, + #[cfg(feature = "replicas")] + replica: _replica, } - self.stream = None; } } -pub struct RedisWriter { - pub sink: SplitSinkKind, +/// A connection to Redis that can be shared and pipelined across tasks. +/// +/// Once a connection becomes usable by clients we can no longer use the request-response logic on `RedisTransport` +/// since caller tasks may have in-flight frames already on the wire. This struct contains extra state used to +/// pipeline commands across tasks. +pub struct Connection { pub server: Server, + pub transport: ConnectionKind, pub default_host: Str, pub addr: Option, - pub buffer: SharedBuffer, + pub buffer: VecDeque, pub version: Option, pub id: Option, pub counters: Counters, - pub reader: Option, + pub last_write: Option, + pub blocked: bool, + #[cfg(feature = "replicas")] + pub replica: bool, } -impl fmt::Debug for RedisWriter { +impl fmt::Debug for Connection { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Connection") + f.debug_struct("RedisConnection") .field("server", &self.server) .field("id", &self.id) .field("default_host", &self.default_host) @@ -1000,126 +748,222 @@ impl fmt::Debug for RedisWriter { } } -impl RedisWriter { - /// Flush the sink and reset the feed counter. - pub async fn flush(&mut self) -> Result<(), RedisError> { - trace!("Flushing socket to {}", self.server); - self.sink.flush().await?; - trace!("Flushed socket to {}", self.server); - self.counters.reset_feed_count(); - Ok(()) - } - - #[cfg(feature = "replicas")] - pub async fn discover_replicas(&mut self, inner: &RefCount) -> Result, RedisError> { - let command = RedisCommand::new(RedisCommandKind::Role, vec![]); - let role = request_response(inner, self, command, None) - .await - .and_then(protocol_utils::frame_to_results)?; - - protocol_utils::parse_master_role_replicas(role) - } +impl Connection { + /// Check if the reader half is healthy, returning any errors. + pub async fn peek_reader_errors(&mut self) -> Option { + // TODO does this need to return an error if poll_peek returns Poll::Ready(None)? + let result = std::future::poll_fn(|cx| match self.transport { + ConnectionKind::Tcp(ref mut t) => match Pin::new(t).poll_peek(cx) { + Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.clone()))), + _ => Poll::Ready(None::>), + }, + #[cfg(feature = "unix-sockets")] + ConnectionKind::Unix(ref mut t) => match Pin::new(t).poll_peek(cx) { + Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.clone()))), + _ => Poll::Ready(None), + }, + #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] + ConnectionKind::Rustls(ref mut t) => match Pin::new(t).poll_peek(cx) { + Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.clone()))), + _ => Poll::Ready(None), + }, + #[cfg(feature = "enable-native-tls")] + ConnectionKind::NativeTls(ref mut t) => match Pin::new(t).poll_peek(cx) { + Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.clone()))), + _ => Poll::Ready(None), + }, + }); - /// Check if the reader task is still running or awaiting frames. - pub fn is_working(&self) -> bool { - self - .reader - .as_ref() - .and_then(|reader| reader.task.as_ref()) - .map(|task| !task.is_finished()) - .unwrap_or(false) + if let Some(Err(e)) = result.await { + Some(e) + } else { + None + } } - /// Send a command to the server without waiting on the response. - pub async fn write_frame( + /// Write a frame to the socket. + /// + /// The caller is responsible for pushing frames into the in-flight buffer. + #[inline(always)] + pub async fn write>( &mut self, - frame: ProtocolFrame, - should_flush: bool, - no_incr: bool, - ) -> Result<(), RedisError> { - if should_flush { - trace!("Writing and flushing {}", self.server); - if let Err(e) = self.sink.send(frame).await { - // the more useful error appears on the reader half but we'll log this just in case - debug!("{}: Error sending frame to socket: {:?}", self.server, e); - return Err(e); - } + frame: F, + flush: bool, + check_unresponsive: bool, + ) -> Result<(), Error> { + if check_unresponsive { + self.last_write = Some(Instant::now()); + } + + if flush { self.counters.reset_feed_count(); + self.transport.send(frame.into()).await } else { - trace!("Writing without flushing {}", self.server); - if let Err(e) = self.sink.feed(frame).await { - // the more useful error appears on the reader half but we'll log this just in case - debug!("{}: Error feeding frame to socket: {:?}", self.server, e); - return Err(e); - } self.counters.incr_feed_count(); - }; - if !no_incr { - self.counters.incr_in_flight(); + self.transport.feed(frame.into()).await } - - Ok(()) } - /// Put a command at the back of the command queue. - pub fn push_command(&self, inner: &RefCount, mut cmd: RedisCommand) { + /// Put a command at the back of the in-flight command buffer. + pub fn push_command(&mut self, mut cmd: Command) { if cmd.has_no_responses() { - _trace!( - inner, - "Skip adding `{}` command to response buffer (no expected responses).", - cmd.kind.to_str_debug() - ); - - cmd.respond_to_router(inner, RouterResponse::Continue); cmd.respond_to_caller(Ok(Resp3Frame::Null)); - return; + } else { + if cmd.blocks_connection() { + self.blocked = true; + } + self.buffer.push_back(cmd); } + } - if cmd.blocks_connection() { - self.buffer.set_blocked(); + /// Read the next frame from the reader half. + /// + /// This function is not cancel-safe. + #[inline(always)] + pub async fn read(&mut self) -> Result, Error> { + match self.transport.next().await { + Some(f) => f.map(|f| Some(f.into_resp3())), + None => Ok(None), } - self.buffer.push(cmd); } - /// Force close the connection. - /// - /// Returns the in-flight commands that had not received a response. - pub fn force_close(self, abort_reader: bool) -> CommandBuffer { - if abort_reader && self.reader.is_some() { - self.reader.unwrap().stop(true); + /// Read frames until detecting a non-pubsub frame. + #[inline(always)] + pub async fn read_skip_pubsub(&mut self, inner: &RefCount) -> Result, Error> { + loop { + let frame = match self.read().await? { + Some(f) => f, + None => return Ok(None), + }; + + if let Some(err) = responses::check_fatal_errors(inner, &self.server, &frame) { + return Err(err); + } else if let Some(frame) = responses::check_pubsub_message(inner, &self.server, frame) { + return Ok(Some(frame)); + } else { + continue; + } + } + } + + /// Read frames until the in-flight buffer is empty. + pub async fn drain(&mut self, inner: &RefCount) -> Result<(), Error> { + let is_clustered = inner.config.server.is_clustered(); + while !self.buffer.is_empty() { + let frame = match self.read().await? { + Some(f) => f, + None => return Ok(()), + }; + + if let Some(err) = responses::check_fatal_errors(inner, &self.server, &frame) { + return Err(err); + } else if let Some(frame) = responses::check_pubsub_message(inner, &self.server, frame) { + if is_clustered { + clustered::process_response_frame(inner, self, frame)?; + } else { + centralized::process_response_frame(inner, self, frame)?; + } + } else { + continue; + } + } + + Ok(()) + } + + /// Read frames until the in-flight buffer is empty, dropping any non-pubsub frames. + pub async fn skip_results(&mut self, inner: &RefCount) -> Result<(), Error> { + while !self.buffer.is_empty() { + if self.read_skip_pubsub(inner).await?.is_none() { + return Ok(()); + } } - self.buffer.drain() + + Ok(()) + } + + /// Flush the sink and reset the feed counter. + pub async fn flush(&mut self) -> Result<(), Error> { + trace!("Flushing socket to {}", self.server); + self.transport.flush().await?; + self.counters.reset_feed_count(); + Ok(()) } - /// Gracefully close the connection and wait for the reader task to finish. + /// Close the connection. /// /// Returns the in-flight commands that had not received a response. - pub async fn graceful_close(mut self) -> CommandBuffer { + pub async fn close(&mut self) -> VecDeque { let _ = utils::timeout( - async { - let _ = self.sink.close().await; - if let Some(mut reader) = self.reader { - let _ = reader.wait().await; - } - - Ok::<_, RedisError>(()) - }, + self.transport.close(), Duration::from_millis(CONNECTION_CLOSE_TIMEOUT_MS), ) .await; - self.buffer.drain() + self.buffer.drain(..).collect() + } +} + +/// Send a command and wait on the response. +/// +/// The connection's in-flight command queue must be empty or drained before calling this. +#[cfg(any(feature = "replicas", feature = "transactions"))] +pub async fn request_response( + inner: &RefCount, + conn: &mut Connection, + command: Command, + timeout: Option, +) -> Result { + let timeout_dur = timeout + .or(command.timeout_dur) + .unwrap_or_else(|| inner.default_command_timeout()); + + _trace!( + inner, + "Sending {} ({}) to {}", + command.kind.to_str_debug(), + command.debug_id(), + conn.server + ); + let frame = protocol_utils::encode_frame(inner, &command)?; + + let check_unresponsive = !command.kind.is_pubsub() && inner.has_unresponsive_duration(); + let ft = async { + conn.write(frame, true, check_unresponsive).await?; + conn.flush().await?; + match conn.read_skip_pubsub(inner).await { + Ok(Some(f)) => Ok(f), + Ok(None) => Err(Error::new(ErrorKind::Unknown, "Missing response.")), + Err(e) => Err(e), + } + }; + if timeout_dur.is_zero() { + ft.await + } else { + utils::timeout(ft, timeout_dur).await } } +#[cfg(feature = "replicas")] +pub async fn discover_replicas(inner: &RefCount, conn: &mut Connection) -> Result, Error> { + utils::timeout(conn.drain(inner), inner.internal_command_timeout()).await?; + + let command = Command::new(CommandKind::Role, vec![]); + let role = request_response(inner, conn, command, None) + .await + .and_then(protocol_utils::frame_to_results)?; + + protocol_utils::parse_master_role_replicas(role) +} + /// Create a connection to the specified `host` and `port` with the provided timeout, in ms. /// /// The returned connection will not be initialized. pub async fn create( - inner: &RefCount, + inner: &RefCount, server: &Server, timeout: Option, -) -> Result { +) -> Result { let timeout = timeout.unwrap_or(inner.connection_timeout()); _trace!( @@ -1129,89 +973,14 @@ pub async fn create( inner.config.uses_rustls(), ); if inner.config.uses_native_tls() { - utils::timeout(RedisTransport::new_native_tls(inner, server), timeout).await + utils::timeout(ExclusiveConnection::new_native_tls(inner, server), timeout).await } else if inner.config.uses_rustls() { - utils::timeout(RedisTransport::new_rustls(inner, server), timeout).await + utils::timeout(ExclusiveConnection::new_rustls(inner, server), timeout).await } else { match inner.config.server { #[cfg(feature = "unix-sockets")] - ServerConfig::Unix { ref path } => utils::timeout(RedisTransport::new_unix(inner, path), timeout).await, - _ => utils::timeout(RedisTransport::new_tcp(inner, server), timeout).await, + ServerConfig::Unix { ref path } => utils::timeout(ExclusiveConnection::new_unix(inner, path), timeout).await, + _ => utils::timeout(ExclusiveConnection::new_tcp(inner, server), timeout).await, } } } - -/// Split a connection, spawn a reader task, register the connection on `inner`, and link the reader and writer -/// halves. -pub fn split( - inner: &RefCount, - transport: RedisTransport, - is_replica: bool, - func: F, -) -> Result<(Server, RedisWriter), RedisError> -where - F: FnOnce( - &RefCount, - SplitStreamKind, - &Server, - &SharedBuffer, - &Counters, - bool, - ) -> JoinHandle>, -{ - let server = transport.server.clone(); - let (mut writer, mut reader) = transport.split(); - let reader_stream = match reader.stream.take() { - Some(stream) => stream, - None => { - return Err(RedisError::new( - RedisErrorKind::Unknown, - "Missing clustered connection reader stream.", - )) - }, - }; - reader.task = Some(func( - inner, - reader_stream, - &writer.server, - &writer.buffer, - &writer.counters, - is_replica, - )); - writer.reader = Some(reader); - inner.add_connection(&server); - - Ok((server, writer)) -} - -/// Send a command to the server and wait for a response. -#[cfg(feature = "replicas")] -pub async fn request_response( - inner: &RefCount, - writer: &mut RedisWriter, - mut command: RedisCommand, - timeout: Option, -) -> Result { - let (tx, rx) = oneshot_channel(); - command.response = ResponseKind::Respond(Some(tx)); - let timeout_dur = timeout - .or(command.timeout_dur) - .unwrap_or_else(|| inner.default_command_timeout()); - - _trace!( - inner, - "Sending {} ({}) to {}", - command.kind.to_str_debug(), - command.debug_id(), - writer.server - ); - let frame = protocol_utils::encode_frame(inner, &command)?; - - if !writer.is_working() { - return Err(RedisError::new(RedisErrorKind::IO, "Connection closed.")); - } - - writer.push_command(inner, command); - writer.write_frame(frame, true, false).await?; - utils::timeout(async { rx.await? }, timeout_dur).await -} diff --git a/src/protocol/debug.rs b/src/protocol/debug.rs index ee718bd1..8d504301 100644 --- a/src/protocol/debug.rs +++ b/src/protocol/debug.rs @@ -1,4 +1,7 @@ -use redis_protocol::{resp2::types::BytesFrame as Resp2Frame, resp3::types::BytesFrame as Resp3Frame}; +use redis_protocol::{ + resp2::types::{BorrowedFrame as Resp2BorrowedFrame, BytesFrame as Resp2Frame}, + resp3::types::{BorrowedFrame as Resp3BorrowedFrame, BytesFrame as Resp3Frame}, +}; use std::{ collections::{HashMap, HashSet}, hash::{Hash, Hasher}, @@ -51,6 +54,19 @@ fn bytes_or_string(b: &[u8]) -> DebugFrame { } } +impl<'a> From<&'a Resp2BorrowedFrame<'a>> for DebugFrame { + fn from(f: &'a Resp2BorrowedFrame<'a>) -> Self { + match f { + Resp2BorrowedFrame::Error(s) => DebugFrame::String(s.to_string()), + Resp2BorrowedFrame::SimpleString(s) => bytes_or_string(s), + Resp2BorrowedFrame::Integer(i) => DebugFrame::Integer(*i), + Resp2BorrowedFrame::BulkString(b) => bytes_or_string(b), + Resp2BorrowedFrame::Null => DebugFrame::String("nil".into()), + Resp2BorrowedFrame::Array(frames) => DebugFrame::Array(frames.iter().map(|f| f.into()).collect()), + } + } +} + impl<'a> From<&'a Resp2Frame> for DebugFrame { fn from(f: &'a Resp2Frame) -> Self { match f { @@ -67,11 +83,9 @@ impl<'a> From<&'a Resp2Frame> for DebugFrame { impl<'a> From<&'a Resp3Frame> for DebugFrame { fn from(frame: &'a Resp3Frame) -> Self { match frame { - Resp3Frame::Map { ref data, .. } => DebugFrame::Array(data.iter().fold(vec![], |mut memo, (key, value)| { - memo.push(key.into()); - memo.push(value.into()); - memo - })), + Resp3Frame::Map { ref data, .. } => { + DebugFrame::Array(data.iter().flat_map(|(k, v)| vec![k.into(), v.into()]).collect()) + }, Resp3Frame::Set { ref data, .. } => DebugFrame::Array(data.iter().map(|d| d.into()).collect()), Resp3Frame::Array { ref data, .. } => DebugFrame::Array(data.iter().map(|d| d.into()).collect()), Resp3Frame::BlobError { ref data, .. } => bytes_or_string(data), @@ -100,6 +114,41 @@ impl<'a> From<&'a Resp3Frame> for DebugFrame { } } +impl<'a> From<&'a Resp3BorrowedFrame<'a>> for DebugFrame { + fn from(frame: &'a Resp3BorrowedFrame<'a>) -> Self { + match frame { + Resp3BorrowedFrame::Map { data, .. } => { + DebugFrame::Array(data.iter().flat_map(|(k, v)| vec![k.into(), v.into()]).collect()) + }, + Resp3BorrowedFrame::Set { data, .. } => DebugFrame::Array(data.iter().map(|d| d.into()).collect()), + Resp3BorrowedFrame::Array { data, .. } => DebugFrame::Array(data.iter().map(|d| d.into()).collect()), + Resp3BorrowedFrame::BlobError { data, .. } => bytes_or_string(data), + Resp3BorrowedFrame::BlobString { data, .. } => bytes_or_string(data), + Resp3BorrowedFrame::SimpleString { data, .. } => bytes_or_string(data), + Resp3BorrowedFrame::SimpleError { ref data, .. } => DebugFrame::String(data.to_string()), + Resp3BorrowedFrame::Double { ref data, .. } => DebugFrame::Double(*data), + Resp3BorrowedFrame::BigNumber { data, .. } => bytes_or_string(data), + Resp3BorrowedFrame::Number { ref data, .. } => DebugFrame::Integer(*data), + Resp3BorrowedFrame::Boolean { ref data, .. } => DebugFrame::String(data.to_string()), + Resp3BorrowedFrame::Null => DebugFrame::String("nil".into()), + Resp3BorrowedFrame::Push { data, .. } => DebugFrame::Array(data.iter().map(|d| d.into()).collect()), + Resp3BorrowedFrame::ChunkedString(data) => bytes_or_string(data), + Resp3BorrowedFrame::VerbatimString { data, .. } => bytes_or_string(data), + Resp3BorrowedFrame::Hello { + ref version, ref auth, .. + } => { + let mut values = vec![DebugFrame::Integer(version.to_byte() as i64)]; + if let Some((ref username, ref password)) = auth { + values.push(DebugFrame::String(username.to_string())); + values.push(DebugFrame::String(password.to_string())); + } + DebugFrame::Array(values) + }, + } + } +} + +// TODO clean this up pub fn log_resp2_frame(name: &str, frame: &Resp2Frame, encode: bool) { let prefix = if encode { "Encoded" } else { "Decoded" }; trace!("{}: {} {:?}", name, prefix, DebugFrame::from(frame)) diff --git a/src/protocol/hashers.rs b/src/protocol/hashers.rs index e3d13564..c8c52fe5 100644 --- a/src/protocol/hashers.rs +++ b/src/protocol/hashers.rs @@ -1,28 +1,28 @@ -use crate::types::RedisValue; +use crate::types::Value; use redis_protocol::redis_keyslot; -pub fn hash_value(value: &RedisValue) -> Option { +pub fn hash_value(value: &Value) -> Option { Some(match value { - RedisValue::String(s) => redis_keyslot(s.as_bytes()), - RedisValue::Bytes(b) => redis_keyslot(b), - RedisValue::Integer(i) => redis_keyslot(i.to_string().as_bytes()), - RedisValue::Double(f) => redis_keyslot(f.to_string().as_bytes()), - RedisValue::Null => redis_keyslot(b"nil"), - RedisValue::Boolean(b) => redis_keyslot(b.to_string().as_bytes()), + Value::String(s) => redis_keyslot(s.as_bytes()), + Value::Bytes(b) => redis_keyslot(b), + Value::Integer(i) => redis_keyslot(i.to_string().as_bytes()), + Value::Double(f) => redis_keyslot(f.to_string().as_bytes()), + Value::Null => redis_keyslot(b"nil"), + Value::Boolean(b) => redis_keyslot(b.to_string().as_bytes()), _ => return None, }) } -pub fn read_redis_key(value: &RedisValue) -> Option<&[u8]> { +pub fn read_key(value: &Value) -> Option<&[u8]> { match value { - RedisValue::String(s) => Some(s.as_bytes()), - RedisValue::Bytes(b) => Some(b), + Value::String(s) => Some(s.as_bytes()), + Value::Bytes(b) => Some(b), _ => None, } } -fn hash_key(value: &RedisValue) -> Option { - read_redis_key(value).map(redis_keyslot) +fn hash_key(value: &Value) -> Option { + read_key(value).map(redis_keyslot) } /// A cluster hashing policy. @@ -75,7 +75,7 @@ impl From<&[u8]> for ClusterHash { impl ClusterHash { /// Hash the provided arguments. - pub fn hash(&self, args: &[RedisValue]) -> Option { + pub fn hash(&self, args: &[Value]) -> Option { match self { ClusterHash::FirstValue => args.first().and_then(hash_value), ClusterHash::FirstKey => args.iter().find_map(hash_key), @@ -86,11 +86,11 @@ impl ClusterHash { } /// Find the key to hash with the provided arguments. - pub fn find_key<'a>(&self, args: &'a [RedisValue]) -> Option<&'a [u8]> { + pub fn find_key<'a>(&self, args: &'a [Value]) -> Option<&'a [u8]> { match self { - ClusterHash::FirstValue => args.first().and_then(read_redis_key), - ClusterHash::FirstKey => args.iter().find_map(read_redis_key), - ClusterHash::Offset(idx) => args.get(*idx).and_then(read_redis_key), + ClusterHash::FirstValue => args.first().and_then(read_key), + ClusterHash::FirstKey => args.iter().find_map(read_key), + ClusterHash::Offset(idx) => args.get(*idx).and_then(read_key), ClusterHash::Random | ClusterHash::Custom(_) => None, } } diff --git a/src/protocol/public.rs b/src/protocol/public.rs deleted file mode 100644 index 856d326f..00000000 --- a/src/protocol/public.rs +++ /dev/null @@ -1,210 +0,0 @@ -use crate::error::{RedisError, RedisErrorKind}; -use bytes::BytesMut; -use redis_protocol::{ - resp2::{decode::decode_mut as resp2_decode, encode::encode_bytes as resp2_encode}, - resp3::{ - decode::streaming::decode_mut as resp3_decode, - encode::complete::encode_bytes as resp3_encode, - types::StreamedFrame, - }, -}; -use tokio_util::codec::{Decoder, Encoder}; - -pub use redis_protocol::{ - redis_keyslot, - resp2::types::{Frame as Resp2Frame, FrameKind as Resp2FrameKind}, - resp2_frame_to_resp3, - resp3::types::{Auth, Frame as Resp3Frame, FrameKind as Resp3FrameKind, RespVersion}, -}; - -/// Encode a redis command string (`SET foo bar NX`, etc) into a RESP3 blob string array. -pub fn resp3_encode_command(cmd: &str) -> Resp3Frame { - Resp3Frame::Array { - data: cmd - .split(' ') - .map(|s| Resp3Frame::BlobString { - data: s.as_bytes().to_vec().into(), - attributes: None, - }) - .collect(), - attributes: None, - } -} - -/// Encode a redis command string (`SET foo bar NX`, etc) into a RESP2 bulk string array. -pub fn resp2_encode_command(cmd: &str) -> Resp2Frame { - Resp2Frame::Array( - cmd - .split(' ') - .map(|s| Resp2Frame::BulkString(s.as_bytes().to_vec().into())) - .collect(), - ) -} - -/// A framed RESP2 codec. -/// -/// ```rust -/// use fred::{ -/// codec::{resp2_encode_command, Resp2, Resp2Frame}, -/// prelude::*, -/// }; -/// use futures::{SinkExt, StreamExt}; -/// use tokio::net::TcpStream; -/// use tokio_util::codec::Framed; -/// -/// async fn example() -> Result<(), RedisError> { -/// let socket = TcpStream::connect("127.0.0.1:6379").await?; -/// let mut framed = Framed::new(socket, Resp2::default()); -/// -/// let auth = resp2_encode_command("AUTH foo bar"); -/// let get_foo = resp2_encode_command("GET foo"); -/// -/// let _ = framed.send(auth).await?; -/// let response = framed.next().await.unwrap().unwrap(); -/// assert_eq!(response.as_str().unwrap(), "OK"); -/// -/// let _ = framed.send(get_foo).await?; -/// let response = framed.next().await.unwrap().unwrap(); -/// assert_eq!(response, Resp2Frame::Null); -/// -/// Ok(()) -/// } -/// ``` -#[derive(Default)] -pub struct Resp2; - -impl Encoder for Resp2 { - type Error = RedisError; - - fn encode(&mut self, item: Resp2Frame, dst: &mut BytesMut) -> Result<(), Self::Error> { - #[cfg(feature = "network-logs")] - trace!("RESP2 codec encode: {:?}", item); - - resp2_encode(dst, &item).map(|_| ()).map_err(RedisError::from) - } -} - -impl Decoder for Resp2 { - type Error = RedisError; - type Item = Resp2Frame; - - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - if src.is_empty() { - return Ok(None); - } - let parsed = match resp2_decode(src)? { - Some((frame, _, _)) => frame, - None => return Ok(None), - }; - #[cfg(feature = "network-logs")] - trace!("RESP2 codec decode: {:?}", parsed); - - Ok(Some(parsed)) - } -} - -/// A framed codec for complete and streaming/chunked RESP3 frames with optional attributes. -/// -/// ```rust -/// use fred::{ -/// codec::{resp3_encode_command, Auth, Resp3, Resp3Frame, RespVersion}, -/// prelude::*, -/// }; -/// use futures::{SinkExt, StreamExt}; -/// use tokio::net::TcpStream; -/// use tokio_util::codec::Framed; -/// -/// // send `HELLO 3 AUTH foo bar` then `GET foo` -/// async fn example() -> Result<(), RedisError> { -/// let socket = TcpStream::connect("127.0.0.1:6379").await?; -/// let mut framed = Framed::new(socket, Resp3::default()); -/// -/// let hello = Resp3Frame::Hello { -/// version: RespVersion::RESP3, -/// auth: Some(Auth { -/// username: "foo".into(), -/// password: "bar".into(), -/// }), -/// }; -/// // or use the shorthand, but this likely only works for simple use cases -/// let get_foo = resp3_encode_command("GET foo"); -/// -/// // `Framed` implements both `Sink` and `Stream` -/// let _ = framed.send(hello).await?; -/// let response = framed.next().await; -/// println!("HELLO response: {:?}", response); -/// -/// let _ = framed.send(get_foo).await?; -/// let response = framed.next().await; -/// println!("GET foo: {:?}", response); -/// -/// Ok(()) -/// } -/// ``` -#[derive(Default)] -pub struct Resp3 { - streaming: Option, -} - -impl Encoder for Resp3 { - type Error = RedisError; - - fn encode(&mut self, item: Resp3Frame, dst: &mut BytesMut) -> Result<(), Self::Error> { - #[cfg(feature = "network-logs")] - trace!("RESP3 codec encode: {:?}", item); - - resp3_encode(dst, &item).map(|_| ()).map_err(RedisError::from) - } -} - -impl Decoder for Resp3 { - type Error = RedisError; - type Item = Resp3Frame; - - // FIXME ideally this would refer to the corresponding fn in codec.rs, but that code is too tightly coupled to the - // private inner interface to expose here - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - if src.is_empty() { - return Ok(None); - } - let parsed = match resp3_decode(src)? { - Some((f, _, _)) => f, - None => return Ok(None), - }; - - if self.streaming.is_some() && parsed.is_streaming() { - return Err(RedisError::new( - RedisErrorKind::Protocol, - "Cannot start a stream while already inside a stream.", - )); - } - - let result = if let Some(ref mut state) = self.streaming { - // we started receiving streamed data earlier - state.add_frame(parsed.into_complete_frame()?); - - if state.is_finished() { - Some(state.into_frame()?) - } else { - None - } - } else { - // we're processing a complete frame or starting a new streamed frame - if parsed.is_streaming() { - self.streaming = Some(parsed.into_streaming_frame()?); - None - } else { - // we're not in the middle of a stream and we found a complete frame - Some(parsed.into_complete_frame()?) - } - }; - - if result.is_some() { - let _ = self.streaming.take(); - } - - #[cfg(feature = "network-logs")] - trace!("RESP3 codec decode: {:?}", result); - Ok(result) - } -} diff --git a/src/protocol/responders.rs b/src/protocol/responders.rs index 489fa527..7c5cd9ec 100644 --- a/src/protocol/responders.rs +++ b/src/protocol/responders.rs @@ -1,20 +1,24 @@ use crate::{ - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces, interfaces::Resp3Frame, - modules::inner::RedisClientInner, + modules::inner::ClientInner, protocol::{ - command::{RedisCommand, RedisCommandKind, ResponseSender, RouterResponse}, + command::{Command, CommandKind, ResponseSender}, types::{KeyScanBufferedInner, KeyScanInner, Server, ValueScanInner, ValueScanResult}, utils as protocol_utils, }, runtime::{AtomicUsize, Mutex, RefCount}, - types::{HScanResult, RedisKey, RedisValue, SScanResult, ScanResult, ZScanResult}, + types::{ + scan::{HScanResult, SScanResult, ScanResult, ZScanResult}, + Key, + Value, + }, utils as client_utils, }; use bytes_utils::Str; use redis_protocol::resp3::types::{FrameKind, Resp3Frame as _Resp3Frame}; -use std::{fmt, fmt::Formatter, iter::repeat, mem, ops::DerefMut}; +use std::{fmt, fmt::Formatter, mem, ops::DerefMut}; #[cfg(feature = "metrics")] use crate::modules::metrics::MovingStats; @@ -28,8 +32,6 @@ const LAST_CURSOR: &str = "0"; pub enum ResponseKind { /// Throw away the response frame and last command in the command buffer. /// - /// Note: The reader task will still unblock the router, if specified. - /// /// Equivalent to `Respond(None)`. Skip, /// Respond to the caller of the last command with the response frame. @@ -129,9 +131,8 @@ impl ResponseKind { } pub fn new_buffer_with_size(expected: usize, tx: ResponseSender) -> Self { - let frames = repeat(Resp3Frame::Null).take(expected).collect(); ResponseKind::Buffer { - frames: RefCount::new(Mutex::new(frames)), + frames: RefCount::new(Mutex::new(vec![Resp3Frame::Null; expected])), tx: RefCount::new(Mutex::new(Some(tx))), received: RefCount::new(AtomicUsize::new(0)), index: 0, @@ -158,7 +159,7 @@ impl ResponseKind { } /// Respond with an error to the caller. - pub fn respond_with_error(&mut self, error: RedisError) { + pub fn respond_with_error(&mut self, error: Error) { if let Some(tx) = self.take_response_tx() { let _ = tx.send(Err(error)); } @@ -188,7 +189,7 @@ fn sample_latency(latency_stats: &RwLock, sent: Instant) { /// Sample overall and network latency values for a command. #[cfg(feature = "metrics")] -fn sample_command_latencies(inner: &RefCount, command: &mut RedisCommand) { +pub fn sample_command_latencies(inner: &RefCount, command: &mut Command) { if let Some(sent) = command.network_start.take() { sample_latency(&inner.network_latency_stats, sent); } @@ -196,27 +197,27 @@ fn sample_command_latencies(inner: &RefCount, command: &mut Re } #[cfg(not(feature = "metrics"))] -fn sample_command_latencies(_: &RefCount, _: &mut RedisCommand) {} +pub fn sample_command_latencies(_: &RefCount, _: &mut Command) {} /// Update the client's protocol version codec version after receiving a non-error response to HELLO. -fn update_protocol_version(inner: &RefCount, command: &RedisCommand, frame: &Resp3Frame) { +fn update_protocol_version(inner: &RefCount, command: &Command, frame: &Resp3Frame) { if !matches!(frame.kind(), FrameKind::SimpleError | FrameKind::BlobError) { let version = match command.kind { - RedisCommandKind::_Hello(ref version) => version, - RedisCommandKind::_HelloAllCluster(ref version) => version, + CommandKind::_Hello(ref version) => version, + CommandKind::_HelloAllCluster(ref version) => version, _ => return, }; _debug!(inner, "Changing RESP version to {:?}", version); - // HELLO cannot be pipelined so this is safe + // HELLO is not pipelined so this is safe inner.switch_protocol_versions(version.clone()); } } fn respond_locked( - inner: &RefCount, + inner: &RefCount, tx: &RefCount>>, - result: Result, + result: Result, ) { if let Some(tx) = tx.lock().take() { if let Err(_) = tx.send(result) { @@ -225,26 +226,18 @@ fn respond_locked( } } -fn add_buffered_frame( +/// Add the provided frame to the response buffer. +fn buffer_frame( server: &Server, buffer: &RefCount>>, index: usize, frame: Resp3Frame, -) -> Result<(), RedisError> { +) -> Result<(), Error> { let mut guard = buffer.lock(); let buffer_ref = guard.deref_mut(); if index >= buffer_ref.len() { - debug!( - "({}) Unexpected buffer response array index: {}, len: {}", - server, - index, - buffer_ref.len() - ); - return Err(RedisError::new( - RedisErrorKind::Unknown, - "Invalid buffer response index.", - )); + return Err(Error::new(ErrorKind::Unknown, "Invalid buffer response index.")); } trace!( @@ -275,14 +268,14 @@ fn merge_multiple_frames(frames: &mut Vec, error_early: bool) -> Res } /// Parse the output of a command that scans keys. -fn parse_key_scan_frame(frame: Resp3Frame) -> Result<(Str, Vec), RedisError> { +fn parse_key_scan_frame(frame: Resp3Frame) -> Result<(Str, Vec), Error> { if let Resp3Frame::Array { mut data, .. } = frame { if data.len() == 2 { - let cursor = match protocol_utils::frame_to_str(&data[0]) { + let cursor = match protocol_utils::frame_to_str(data[0].clone()) { Some(s) => s, None => { - return Err(RedisError::new( - RedisErrorKind::Protocol, + return Err(Error::new( + ErrorKind::Protocol, "Expected first SCAN result element to be a bulk string.", )) }, @@ -292,11 +285,11 @@ fn parse_key_scan_frame(frame: Resp3Frame) -> Result<(Str, Vec), Redis let mut keys = Vec::with_capacity(data.len()); for frame in data.into_iter() { - let key = match protocol_utils::frame_to_bytes(&frame) { + let key = match protocol_utils::frame_to_bytes(frame) { Some(s) => s, None => { - return Err(RedisError::new( - RedisErrorKind::Protocol, + return Err(Error::new( + ErrorKind::Protocol, "Expected an array of strings from second SCAN result.", )) }, @@ -307,34 +300,31 @@ fn parse_key_scan_frame(frame: Resp3Frame) -> Result<(Str, Vec), Redis Ok((cursor, keys)) } else { - Err(RedisError::new( - RedisErrorKind::Protocol, + Err(Error::new( + ErrorKind::Protocol, "Expected second SCAN result element to be an array.", )) } } else { - Err(RedisError::new( - RedisErrorKind::Protocol, + Err(Error::new( + ErrorKind::Protocol, "Expected two-element bulk string array from SCAN.", )) } } else { - Err(RedisError::new( - RedisErrorKind::Protocol, - "Expected bulk string array from SCAN.", - )) + Err(Error::new(ErrorKind::Protocol, "Expected bulk string array from SCAN.")) } } /// Parse the output of a command that scans values. -fn parse_value_scan_frame(frame: Resp3Frame) -> Result<(Str, Vec), RedisError> { +fn parse_value_scan_frame(frame: Resp3Frame) -> Result<(Str, Vec), Error> { if let Resp3Frame::Array { mut data, .. } = frame { if data.len() == 2 { - let cursor = match protocol_utils::frame_to_str(&data[0]) { + let cursor = match protocol_utils::frame_to_str(data[0].clone()) { Some(s) => s, None => { - return Err(RedisError::new( - RedisErrorKind::Protocol, + return Err(Error::new( + ErrorKind::Protocol, "Expected first result element to be a bulk string.", )) }, @@ -349,32 +339,32 @@ fn parse_value_scan_frame(frame: Resp3Frame) -> Result<(Str, Vec), R Ok((cursor, values)) } else { - Err(RedisError::new( - RedisErrorKind::Protocol, + Err(Error::new( + ErrorKind::Protocol, "Expected second result element to be an array.", )) } } else { - Err(RedisError::new( - RedisErrorKind::Protocol, + Err(Error::new( + ErrorKind::Protocol, "Expected two-element bulk string array.", )) } } else { - Err(RedisError::new(RedisErrorKind::Protocol, "Expected bulk string array.")) + Err(Error::new(ErrorKind::Protocol, "Expected bulk string array.")) } } /// Send the output to the caller of a command that scans values. fn send_value_scan_result( - inner: &RefCount, + inner: &RefCount, scanner: ValueScanInner, - command: &RedisCommand, - result: Vec, + command: &Command, + result: Vec, can_continue: bool, -) -> Result<(), RedisError> { +) -> Result<(), Error> { match command.kind { - RedisCommandKind::Zscan => { + CommandKind::Zscan => { let tx = scanner.tx.clone(); let results = ValueScanInner::transform_zscan_result(result)?; @@ -385,11 +375,11 @@ fn send_value_scan_result( results: Some(results), }); - if let Err(_) = tx.send(Ok(state)) { + if let Err(_) = tx.try_send(Ok(state)) { _warn!(inner, "Failed to send ZSCAN result to caller"); } }, - RedisCommandKind::Sscan => { + CommandKind::Sscan => { let tx = scanner.tx.clone(); let state = ValueScanResult::SScan(SScanResult { @@ -399,11 +389,11 @@ fn send_value_scan_result( results: Some(result), }); - if let Err(_) = tx.send(Ok(state)) { + if let Err(_) = tx.try_send(Ok(state)) { _warn!(inner, "Failed to send SSCAN result to caller"); } }, - RedisCommandKind::Hscan => { + CommandKind::Hscan => { let tx = scanner.tx.clone(); let results = ValueScanInner::transform_hscan_result(result)?; @@ -414,13 +404,13 @@ fn send_value_scan_result( results: Some(results), }); - if let Err(_) = tx.send(Ok(state)) { + if let Err(_) = tx.try_send(Ok(state)) { _warn!(inner, "Failed to send HSCAN result to caller"); } }, _ => { - return Err(RedisError::new( - RedisErrorKind::Unknown, + return Err(Error::new( + ErrorKind::Unknown, "Invalid redis command. Expected HSCAN, SSCAN, or ZSCAN.", )) }, @@ -431,12 +421,12 @@ fn send_value_scan_result( /// Respond to the caller with the default response policy. pub fn respond_to_caller( - inner: &RefCount, + inner: &RefCount, server: &Server, - mut command: RedisCommand, + mut command: Command, tx: ResponseSender, frame: Resp3Frame, -) -> Result<(), RedisError> { +) -> Result<(), Error> { sample_command_latencies(inner, &mut command); _trace!( inner, @@ -450,16 +440,15 @@ pub fn respond_to_caller( } let _ = tx.send(Ok(frame)); - command.respond_to_router(inner, RouterResponse::Continue); Ok(()) } /// Respond to the caller, assuming multiple response frames from the last command, storing intermediate responses in /// the shared buffer. pub fn respond_buffer( - inner: &RefCount, + inner: &RefCount, server: &Server, - command: RedisCommand, + command: Command, received: RefCount, expected: usize, error_early: bool, @@ -467,7 +456,7 @@ pub fn respond_buffer( index: usize, tx: RefCount>>, frame: Resp3Frame, -) -> Result<(), RedisError> { +) -> Result<(), Error> { _trace!( inner, "Handling `buffer` response from {} for {}. kind {:?}, Index: {}, ID: {}", @@ -480,15 +469,13 @@ pub fn respond_buffer( let closes_connection = command.kind.closes_connection(); // errors are buffered like normal frames and are not returned early - if let Err(e) = add_buffered_frame(server, &frames, index, frame) { + if let Err(e) = buffer_frame(server, &frames, index, frame) { if closes_connection { _debug!(inner, "Ignoring unexpected buffer response index from QUIT or SHUTDOWN"); - respond_locked(inner, &tx, Err(RedisError::new_canceled())); - command.respond_to_router(inner, RouterResponse::Continue); - return Err(RedisError::new_canceled()); + respond_locked(inner, &tx, Err(Error::new_canceled())); + return Err(Error::new_canceled()); } else { respond_locked(inner, &tx, Err(e)); - command.respond_to_router(inner, RouterResponse::Continue); _error!( inner, "Exiting early after unexpected buffer response index from {} with command {}, ID {}", @@ -496,16 +483,10 @@ pub fn respond_buffer( command.kind.to_str_debug(), command.debug_id() ); - return Err(RedisError::new( - RedisErrorKind::Unknown, - "Invalid buffer response index.", - )); + return Err(Error::new(ErrorKind::Unknown, "Invalid buffer response index.")); } } - // this must come after adding the buffered frame. there's a potential race condition if this task is interrupted - // due to contention on the frame lock and another parallel task moves past the `received==expected` check before - // this task can add the frame to the buffer. let received = client_utils::incr_atomic(&received); if received == expected { _trace!( @@ -519,24 +500,19 @@ pub fn respond_buffer( if matches!(frame.kind(), FrameKind::SimpleError | FrameKind::BlobError) { let err = match frame.as_str() { Some(s) => protocol_utils::pretty_error(s), - None => RedisError::new( - RedisErrorKind::Unknown, - "Unknown or invalid error from buffered frames.", - ), + None => Error::new(ErrorKind::Unknown, "Unknown or invalid error from buffered frames."), }; respond_locked(inner, &tx, Err(err)); } else { respond_locked(inner, &tx, Ok(frame)); } - command.respond_to_router(inner, RouterResponse::Continue); } else { - // more responses are expected _trace!( inner, - "Waiting on {} more responses to all nodes command, ID: {}", + "({}) Waiting on {} more responses", + command.debug_id(), expected - received, - command.debug_id() ); // this response type is shared across connections so we do not return the command to be re-queued } @@ -546,12 +522,12 @@ pub fn respond_buffer( /// Respond to the caller of a key scanning operation. pub fn respond_key_scan( - inner: &RefCount, + inner: &RefCount, server: &Server, - command: RedisCommand, + command: Command, mut scanner: KeyScanInner, frame: Resp3Frame, -) -> Result<(), RedisError> { +) -> Result<(), Error> { _trace!( inner, "Handling `KeyScan` response from {} for {}", @@ -562,14 +538,12 @@ pub fn respond_key_scan( Ok(result) => result, Err(e) => { scanner.send_error(e); - command.respond_to_router(inner, RouterResponse::Continue); return Ok(()); }, }; let scan_stream = scanner.tx.clone(); let can_continue = next_cursor != LAST_CURSOR; scanner.update_cursor(next_cursor); - command.respond_to_router(inner, RouterResponse::Continue); let scan_result = ScanResult { scan_state: Some(scanner), @@ -577,7 +551,7 @@ pub fn respond_key_scan( results: Some(keys), can_continue, }; - if let Err(_) = scan_stream.send(Ok(scan_result)) { + if let Err(_) = scan_stream.try_send(Ok(scan_result)) { _debug!(inner, "Error sending SCAN page."); } @@ -585,12 +559,12 @@ pub fn respond_key_scan( } pub fn respond_key_scan_buffered( - inner: &RefCount, + inner: &RefCount, server: &Server, - command: RedisCommand, + command: Command, mut scanner: KeyScanBufferedInner, frame: Resp3Frame, -) -> Result<(), RedisError> { +) -> Result<(), Error> { _trace!( inner, "Handling `KeyScanBuffered` response from {} for {}", @@ -602,40 +576,38 @@ pub fn respond_key_scan_buffered( Ok(result) => result, Err(e) => { scanner.send_error(e); - command.respond_to_router(inner, RouterResponse::Continue); return Ok(()); }, }; let scan_stream = scanner.tx.clone(); let can_continue = next_cursor != LAST_CURSOR; scanner.update_cursor(next_cursor); - command.respond_to_router(inner, RouterResponse::Continue); for key in keys.into_iter() { - if let Err(_) = scan_stream.send(Ok(key)) { + if let Err(_) = scan_stream.try_send(Ok(key)) { _debug!(inner, "Error sending SCAN key."); break; } } - if can_continue { - let mut command = RedisCommand::new(RedisCommandKind::Scan, Vec::new()); + let mut command = Command::new(CommandKind::Scan, Vec::new()); command.response = ResponseKind::KeyScanBuffered(scanner); if let Err(e) = interfaces::default_send_command(inner, command) { - let _ = scan_stream.send(Err(e)); + let _ = scan_stream.try_send(Err(e)); }; } + Ok(()) } /// Respond to the caller of a value scanning operation. pub fn respond_value_scan( - inner: &RefCount, + inner: &RefCount, server: &Server, - command: RedisCommand, + command: Command, mut scanner: ValueScanInner, frame: Resp3Frame, -) -> Result<(), RedisError> { +) -> Result<(), Error> { _trace!( inner, "Handling `ValueScan` response from {} for {}", @@ -647,18 +619,16 @@ pub fn respond_value_scan( Ok(result) => result, Err(e) => { scanner.send_error(e); - command.respond_to_router(inner, RouterResponse::Continue); return Ok(()); }, }; let scan_stream = scanner.tx.clone(); let can_continue = next_cursor != LAST_CURSOR; scanner.update_cursor(next_cursor); - command.respond_to_router(inner, RouterResponse::Continue); _trace!(inner, "Sending value scan result with {} values", values.len()); if let Err(e) = send_value_scan_result(inner, scanner, &command, values, can_continue) { - if let Err(_) = scan_stream.send(Err(e)) { + if let Err(_) = scan_stream.try_send(Err(e)) { _warn!(inner, "Error sending scan result."); } } diff --git a/src/protocol/tls.rs b/src/protocol/tls.rs index 99c7b622..3b3ad0e4 100644 --- a/src/protocol/tls.rs +++ b/src/protocol/tls.rs @@ -1,4 +1,4 @@ -use crate::error::RedisError; +use crate::error::{Error, ErrorKind}; use std::{ fmt, fmt::{Debug, Formatter}, @@ -6,8 +6,6 @@ use std::{ sync::Arc, }; -#[cfg(feature = "enable-native-tls")] -use crate::error::RedisErrorKind; #[cfg(feature = "enable-native-tls")] use std::convert::{TryFrom, TryInto}; #[cfg(feature = "enable-native-tls")] @@ -84,7 +82,7 @@ impl Eq for TlsHostMapping {} /// Note: the `hostnames` field is only necessary to use with certain clustered deployments. /// /// ```rust no_run -/// # use fred::types::*; +/// # use fred::types::config::*; /// let config = TlsConfig { /// // or use `TlsConnector::default_rustls()` /// connector: TlsConnector::default_native_tls().unwrap(), @@ -151,17 +149,24 @@ impl TlsConnector { /// Create a default TLS connector from the `native-tls` module. #[cfg(feature = "enable-native-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "enable-native-tls")))] - pub fn default_native_tls() -> Result { + pub fn default_native_tls() -> Result { NativeTlsConnector::builder().try_into() } /// Create a default TLS connector with the `rustls` module with safe defaults and system certs via [rustls-native-certs](https://github.com/rustls/rustls-native-certs). #[cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "enable-rustls", feature = "enable-rustls-ring"))))] - pub fn default_rustls() -> Result { - let system_certs = rustls_native_certs::load_native_certs()?; + pub fn default_rustls() -> Result { + let mut system_certs = rustls_native_certs::load_native_certs(); + if !system_certs.errors.is_empty() { + return Err(Error::new( + ErrorKind::Tls, + format!("{:?}", system_certs.errors.pop().unwrap()), + )); + } + let mut cert_store = RootCertStore::empty(); - for system_cert in system_certs.into_iter() { + for system_cert in system_certs.certs.into_iter() { cert_store.add(system_cert)?; } @@ -177,13 +182,13 @@ impl TlsConnector { #[cfg(feature = "enable-native-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "enable-native-tls")))] impl TryFrom for TlsConnector { - type Error = RedisError; + type Error = Error; fn try_from(builder: NativeTlsConnectorBuilder) -> Result { let connector = builder .build() .map(TokioNativeTlsConnector::from) - .map_err(|e| RedisError::new(RedisErrorKind::Tls, format!("{:?}", e)))?; + .map_err(|e| Error::new(ErrorKind::Tls, format!("{:?}", e)))?; Ok(TlsConnector::Native(connector)) } } diff --git a/src/protocol/types.rs b/src/protocol/types.rs index 005b849f..53c2ffe0 100644 --- a/src/protocol/types.rs +++ b/src/protocol/types.rs @@ -1,11 +1,16 @@ use super::utils as protocol_utils; use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, - prelude::RedisResult, + error::{Error, ErrorKind}, + modules::inner::ClientInner, + prelude::FredResult, protocol::{cluster, utils::server_to_parts}, - runtime::{RefCount, UnboundedSender}, - types::*, + runtime::{RefCount, Sender}, + types::{ + scan::{HScanResult, SScanResult, ScanResult, ZScanResult}, + Key, + Map, + Value, + }, utils, }; use async_trait::async_trait; @@ -21,6 +26,12 @@ use std::{ net::{SocketAddr, ToSocketAddrs}, }; +#[cfg(any( + feature = "enable-rustls", + feature = "enable-native-tls", + feature = "enable-rustls-ring" +))] +use crate::types::config::TlsHostMapping; #[cfg(any( feature = "enable-rustls", feature = "enable-native-tls", @@ -28,8 +39,8 @@ use std::{ ))] use std::{net::IpAddr, str::FromStr}; -/// Any kind of RESP frame. -#[derive(Debug)] +/// Any kind of owned RESP frame. +#[derive(Debug, Clone)] pub enum ProtocolFrame { Resp2(Resp2Frame), Resp3(Resp3Frame), @@ -38,7 +49,7 @@ pub enum ProtocolFrame { impl ProtocolFrame { /// Convert the frame to RESP3. pub fn into_resp3(self) -> Resp3Frame { - // the `RedisValue::convert` logic already accounts for different encodings of maps and sets, so + // the `Value::convert` logic already accounts for different encodings of maps and sets, so // we can just change everything to RESP3 above the protocol layer match self { ProtocolFrame::Resp2(frame) => frame.into_resp3(), @@ -208,18 +219,18 @@ impl From<&std::path::Path> for Server { } impl TryFrom for Server { - type Error = RedisError; + type Error = Error; fn try_from(value: String) -> Result { - Server::from_str(&value).ok_or(RedisError::new(RedisErrorKind::Config, "Invalid `host:port` server.")) + Server::from_str(&value).ok_or(Error::new(ErrorKind::Config, "Invalid `host:port` server.")) } } impl TryFrom<&str> for Server { - type Error = RedisError; + type Error = Error; fn try_from(value: &str) -> Result { - Server::from_str(value).ok_or(RedisError::new(RedisErrorKind::Config, "Invalid `host:port` server.")) + Server::from_str(value).ok_or(Error::new(ErrorKind::Config, "Invalid `host:port` server.")) } } @@ -325,7 +336,7 @@ pub struct Message { /// The channel on which the message was sent. pub channel: Str, /// The message contents. - pub value: RedisValue, + pub value: Value, /// The type of message subscription. pub kind: MessageKind, /// The server that sent the message. @@ -340,9 +351,9 @@ pub struct KeyScanInner { /// The index of the cursor in `args`. pub cursor_idx: usize, /// The arguments sent in each scan command. - pub args: Vec, + pub args: Vec, /// The sender half of the results channel. - pub tx: UnboundedSender>, + pub tx: Sender>, } pub struct KeyScanBufferedInner { @@ -353,9 +364,9 @@ pub struct KeyScanBufferedInner { /// The index of the cursor in `args`. pub cursor_idx: usize, /// The arguments sent in each scan command. - pub args: Vec, + pub args: Vec, /// The sender half of the results channel. - pub tx: UnboundedSender>, + pub tx: Sender>, } impl KeyScanInner { @@ -365,8 +376,8 @@ impl KeyScanInner { } /// Send an error on the response stream. - pub fn send_error(&self, error: RedisError) { - let _ = self.tx.send(Err(error)); + pub fn send_error(&self, error: Error) { + let _ = self.tx.try_send(Err(error)); } } @@ -377,8 +388,8 @@ impl KeyScanBufferedInner { } /// Send an error on the response stream. - pub fn send_error(&self, error: RedisError) { - let _ = self.tx.send(Err(error)); + pub fn send_error(&self, error: Error) { + let _ = self.tx.try_send(Err(error)); } } @@ -392,9 +403,9 @@ pub struct ValueScanInner { /// The index of the cursor argument in `args`. pub cursor_idx: usize, /// The arguments sent in each scan command. - pub args: Vec, + pub args: Vec, /// The sender half of the results channel. - pub tx: UnboundedSender>, + pub tx: Sender>, } impl ValueScanInner { @@ -404,17 +415,17 @@ impl ValueScanInner { } /// Send an error on the response stream. - pub fn send_error(&self, error: RedisError) { - let _ = self.tx.send(Err(error)); + pub fn send_error(&self, error: Error) { + let _ = self.tx.try_send(Err(error)); } - pub fn transform_hscan_result(mut data: Vec) -> Result { + pub fn transform_hscan_result(mut data: Vec) -> Result { if data.is_empty() { - return Ok(RedisMap::new()); + return Ok(Map::new()); } if data.len() % 2 != 0 { - return Err(RedisError::new( - RedisErrorKind::Protocol, + return Err(Error::new( + ErrorKind::Protocol, "Invalid HSCAN result. Expected array with an even number of elements.", )); } @@ -422,12 +433,12 @@ impl ValueScanInner { let mut out = HashMap::with_capacity(data.len() / 2); while data.len() >= 2 { let value = data.pop().unwrap(); - let key: RedisKey = match data.pop().unwrap() { - RedisValue::String(s) => s.into(), - RedisValue::Bytes(b) => b.into(), + let key: Key = match data.pop().unwrap() { + Value::String(s) => s.into(), + Value::Bytes(b) => b.into(), _ => { - return Err(RedisError::new( - RedisErrorKind::Protocol, + return Err(Error::new( + ErrorKind::Protocol, "Invalid HSCAN result. Expected string.", )) }, @@ -439,13 +450,13 @@ impl ValueScanInner { out.try_into() } - pub fn transform_zscan_result(mut data: Vec) -> Result, RedisError> { + pub fn transform_zscan_result(mut data: Vec) -> Result, Error> { if data.is_empty() { return Ok(Vec::new()); } if data.len() % 2 != 0 { - return Err(RedisError::new( - RedisErrorKind::Protocol, + return Err(Error::new( + ErrorKind::Protocol, "Invalid ZSCAN result. Expected array with an even number of elements.", )); } @@ -455,12 +466,12 @@ impl ValueScanInner { for chunk in data.chunks_exact_mut(2) { let value = chunk[0].take(); let score = match chunk[1].take() { - RedisValue::String(s) => utils::redis_string_to_f64(&s)?, - RedisValue::Integer(i) => i as f64, - RedisValue::Double(f) => f, + Value::String(s) => utils::string_to_f64(&s)?, + Value::Integer(i) => i as f64, + Value::Double(f) => f, _ => { - return Err(RedisError::new( - RedisErrorKind::Protocol, + return Err(Error::new( + ErrorKind::Protocol, "Invalid HSCAN result. Expected a string or number score.", )) }, @@ -505,7 +516,7 @@ impl ClusterRouting { /// Create a new routing table from the result of the `CLUSTER SLOTS` command. /// /// The `default_host` value refers to the server that provided the response. - pub fn from_cluster_slots>(value: RedisValue, default_host: S) -> Result { + pub fn from_cluster_slots>(value: Value, default_host: S) -> Result { let default_host = default_host.into(); let mut data = cluster::parse_cluster_slots(value, &default_host)?; data.sort_by(|a, b| a.start.cmp(&b.start)); @@ -538,10 +549,10 @@ impl ClusterRouting { /// Rebuild the cache in place with the output of a `CLUSTER SLOTS` command. pub(crate) fn rebuild( &mut self, - inner: &RefCount, - cluster_slots: RedisValue, + inner: &RefCount, + cluster_slots: Value, default_host: &Str, - ) -> Result<(), RedisError> { + ) -> Result<(), Error> { self.data = cluster::parse_cluster_slots(cluster_slots, default_host)?; self.data.sort_by(|a, b| a.start.cmp(&b.start)); @@ -643,13 +654,13 @@ impl DefaultResolver { #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] pub trait Resolve: 'static { /// Resolve a hostname. - async fn resolve(&self, host: Str, port: u16) -> RedisResult>; + async fn resolve(&self, host: Str, port: u16) -> FredResult>; } #[cfg(feature = "glommio")] #[async_trait(?Send)] impl Resolve for DefaultResolver { - async fn resolve(&self, host: Str, port: u16) -> RedisResult> { + async fn resolve(&self, host: Str, port: u16) -> FredResult> { let client_id = self.id.clone(); // glommio users should probably use a non-blocking impl such as hickory-dns @@ -658,8 +669,8 @@ impl Resolve for DefaultResolver { let ips: Vec = addr.to_socket_addrs()?.collect(); if ips.is_empty() { - Err(RedisError::new( - RedisErrorKind::IO, + Err(Error::new( + ErrorKind::IO, format!("Failed to resolve {}:{}", host, port), )) } else { @@ -679,13 +690,13 @@ impl Resolve for DefaultResolver { #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] pub trait Resolve: Send + Sync + 'static { /// Resolve a hostname. - async fn resolve(&self, host: Str, port: u16) -> RedisResult>; + async fn resolve(&self, host: Str, port: u16) -> FredResult>; } #[cfg(not(feature = "glommio"))] #[async_trait] impl Resolve for DefaultResolver { - async fn resolve(&self, host: Str, port: u16) -> RedisResult> { + async fn resolve(&self, host: Str, port: u16) -> FredResult> { let client_id = self.id.clone(); tokio::task::spawn_blocking(move || { @@ -693,8 +704,8 @@ impl Resolve for DefaultResolver { let ips: Vec = addr.to_socket_addrs()?.collect(); if ips.is_empty() { - Err(RedisError::new( - RedisErrorKind::IO, + Err(Error::new( + ErrorKind::IO, format!("Failed to resolve {}:{}", host, port), )) } else { diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 9538d48c..1bda0e57 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,9 +1,9 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, + error::{Error, ErrorKind}, + modules::inner::ClientInner, protocol::{ - codec::RedisCodec, - command::{ClusterErrorKind, RedisCommand, RedisCommandKind}, + codec::Codec, + command::{ClusterErrorKind, Command, CommandKind}, connection::OK, types::{ProtocolFrame, *}, }, @@ -29,7 +29,7 @@ static LEGACY_AUTH_ERROR_BODY: &str = "ERR Client sent AUTH, but no password is static ACL_AUTH_ERROR_PREFIX: &str = "ERR AUTH called without any password configured for the default user"; -pub fn parse_cluster_error(data: &str) -> Result<(ClusterErrorKind, u16, String), RedisError> { +pub fn parse_cluster_error(data: &str) -> Result<(ClusterErrorKind, u16, String), Error> { let parts: Vec<&str> = data.split(' ').collect(); if parts.len() == 3 { let kind: ClusterErrorKind = parts[0].try_into()?; @@ -38,7 +38,7 @@ pub fn parse_cluster_error(data: &str) -> Result<(ClusterErrorKind, u16, String) Ok((kind, slot, server)) } else { - Err(RedisError::new(RedisErrorKind::Protocol, "Expected cluster error.")) + Err(Error::new(ErrorKind::Protocol, "Expected cluster error.")) } } @@ -56,10 +56,10 @@ pub fn is_ok(frame: &Resp3Frame) -> bool { } } -pub fn server_to_parts(server: &str) -> Result<(&str, u16), RedisError> { +pub fn server_to_parts(server: &str) -> Result<(&str, u16), Error> { let parts: Vec<&str> = server.split(':').collect(); if parts.len() < 2 { - return Err(RedisError::new(RedisErrorKind::IO, "Invalid server.")); + return Err(Error::new(ErrorKind::IO, "Invalid server.")); } Ok((parts[0], parts[1].parse::()?)) } @@ -93,30 +93,30 @@ pub fn binary_search(slots: &[SlotRange], slot: u16) -> Option { None } -pub fn pretty_error(resp: &str) -> RedisError { +pub fn pretty_error(resp: &str) -> Error { let kind = { let mut parts = resp.split_whitespace(); match parts.next().unwrap_or("") { - "" => RedisErrorKind::Unknown, + "" => ErrorKind::Unknown, "ERR" => { if resp.contains("instance has cluster support disabled") { // Cluster client connecting to non-cluster server. // Returning Config to signal no reconnect will help. - RedisErrorKind::Config + ErrorKind::Config } else { - RedisErrorKind::Unknown + ErrorKind::Unknown } }, - "WRONGTYPE" => RedisErrorKind::InvalidArgument, - "NOAUTH" | "WRONGPASS" => RedisErrorKind::Auth, - "MOVED" | "ASK" | "CLUSTERDOWN" => RedisErrorKind::Cluster, + "WRONGTYPE" => ErrorKind::InvalidArgument, + "NOAUTH" | "WRONGPASS" => ErrorKind::Auth, + "MOVED" | "ASK" | "CLUSTERDOWN" => ErrorKind::Cluster, "Invalid" => match parts.next().unwrap_or("") { - "argument(s)" | "Argument" => RedisErrorKind::InvalidArgument, - "command" | "Command" => RedisErrorKind::InvalidCommand, - _ => RedisErrorKind::Unknown, + "argument(s)" | "Argument" => ErrorKind::InvalidArgument, + "command" | "Command" => ErrorKind::InvalidCommand, + _ => ErrorKind::Unknown, }, - _ => RedisErrorKind::Unknown, + _ => ErrorKind::Unknown, } }; @@ -125,11 +125,11 @@ pub fn pretty_error(resp: &str) -> RedisError { } else { Cow::Owned(resp.to_owned()) }; - RedisError::new(kind, details) + Error::new(kind, details) } /// Parse the frame as a string, without support for error frames. -pub fn frame_into_string(frame: Resp3Frame) -> Result { +pub fn frame_into_string(frame: Resp3Frame) -> Result { match frame { Resp3Frame::SimpleString { data, .. } => Ok(String::from_utf8(data.to_vec())?), Resp3Frame::BlobString { data, .. } => Ok(String::from_utf8(data.to_vec())?), @@ -140,7 +140,7 @@ pub fn frame_into_string(frame: Resp3Frame) -> Result { Resp3Frame::BigNumber { data, .. } => Ok(String::from_utf8(data.to_vec())?), Resp3Frame::SimpleError { data, .. } => Err(pretty_error(&data)), Resp3Frame::BlobError { data, .. } => Err(pretty_error(str::from_utf8(&data)?)), - _ => Err(RedisError::new(RedisErrorKind::Protocol, "Expected string.")), + _ => Err(Error::new(ErrorKind::Protocol, "Expected string.")), } } @@ -156,7 +156,7 @@ pub fn parse_shard_pubsub_frame(server: &Server, frame: &Resp3Frame) -> Option channel, None => return None, }; @@ -185,11 +185,11 @@ pub fn parse_shard_pubsub_frame(server: &Server, frame: &Resp3Frame) -> Option Result { +pub fn parse_message_kind(frame: &Resp3Frame) -> Result { let frames = match frame { Resp3Frame::Array { ref data, .. } => data, Resp3Frame::Push { ref data, .. } => data, - _ => return Err(RedisError::new(RedisErrorKind::Protocol, "Invalid pubsub frame type.")), + _ => return Err(Error::new(ErrorKind::Protocol, "Invalid pubsub frame type.")), }; let parsed = if frames.len() == 3 { @@ -211,35 +211,31 @@ pub fn parse_message_kind(frame: &Resp3Frame) -> Result None }; - parsed.ok_or(RedisError::new( - RedisErrorKind::Protocol, - "Invalid pubsub message kind.", - )) + parsed.ok_or(Error::new(ErrorKind::Protocol, "Invalid pubsub message kind.")) } /// Parse the channel and value fields from a pubsub frame. -pub fn parse_message_fields(frame: Resp3Frame) -> Result<(Str, RedisValue), RedisError> { +pub fn parse_message_fields(frame: Resp3Frame) -> Result<(Str, Value), Error> { let mut frames = match frame { Resp3Frame::Array { data, .. } => data, Resp3Frame::Push { data, .. } => data, - _ => return Err(RedisError::new(RedisErrorKind::Protocol, "Invalid pubsub frame type.")), + _ => return Err(Error::new(ErrorKind::Protocol, "Invalid pubsub frame type.")), }; let value = frames .pop() - .ok_or(RedisError::new(RedisErrorKind::Protocol, "Invalid pubsub message."))?; + .ok_or(Error::new(ErrorKind::Protocol, "Invalid pubsub message."))?; let channel = frames .pop() - .ok_or(RedisError::new(RedisErrorKind::Protocol, "Invalid pubsub channel."))?; - let channel = - frame_to_str(&channel).ok_or(RedisError::new(RedisErrorKind::Protocol, "Failed to parse channel."))?; + .ok_or(Error::new(ErrorKind::Protocol, "Invalid pubsub channel."))?; + let channel = frame_to_str(channel).ok_or(Error::new(ErrorKind::Protocol, "Failed to parse channel."))?; let value = frame_to_results(value)?; Ok((channel, value)) } /// Parse the frame as a pubsub message. -pub fn frame_to_pubsub(server: &Server, frame: Resp3Frame) -> Result { +pub fn frame_to_pubsub(server: &Server, frame: Resp3Frame) -> Result { if let Some(message) = parse_shard_pubsub_frame(server, &frame) { return Ok(message); } @@ -255,7 +251,7 @@ pub fn frame_to_pubsub(server: &Server, frame: Resp3Frame) -> Result Resp2Frame { +pub fn check_resp2_auth_error(codec: &Codec, frame: Resp2Frame) -> Resp2Frame { let is_auth_error = match frame { Resp2Frame::Error(ref data) => *data == LEGACY_AUTH_ERROR_BODY || data.starts_with(ACL_AUTH_ERROR_PREFIX), _ => false, @@ -274,7 +270,7 @@ pub fn check_resp2_auth_error(codec: &RedisCodec, frame: Resp2Frame) -> Resp2Fra } } -pub fn check_resp3_auth_error(codec: &RedisCodec, frame: Resp3Frame) -> Resp3Frame { +pub fn check_resp3_auth_error(codec: &Codec, frame: Resp3Frame) -> Resp3Frame { let is_auth_error = match frame { Resp3Frame::SimpleError { ref data, .. } => { *data == LEGACY_AUTH_ERROR_BODY || data.starts_with(ACL_AUTH_ERROR_PREFIX) @@ -299,71 +295,71 @@ pub fn check_resp3_auth_error(codec: &RedisCodec, frame: Resp3Frame) -> Resp3Fra } /// Try to parse the data as a string, and failing that return a byte slice. -pub fn string_or_bytes(data: Bytes) -> RedisValue { +pub fn string_or_bytes(data: Bytes) -> Value { if let Ok(s) = Str::from_inner(data.clone()) { - RedisValue::String(s) + Value::String(s) } else { - RedisValue::Bytes(data) + Value::Bytes(data) } } -pub fn frame_to_bytes(frame: &Resp3Frame) -> Option { +pub fn frame_to_bytes(frame: Resp3Frame) -> Option { match frame { - Resp3Frame::BigNumber { data, .. } => Some(data.clone()), - Resp3Frame::VerbatimString { data, .. } => Some(data.clone()), - Resp3Frame::BlobString { data, .. } => Some(data.clone()), - Resp3Frame::SimpleString { data, .. } => Some(data.clone()), - Resp3Frame::BlobError { data, .. } => Some(data.clone()), - Resp3Frame::SimpleError { data, .. } => Some(data.inner().clone()), + Resp3Frame::BigNumber { data, .. } => Some(data), + Resp3Frame::VerbatimString { data, .. } => Some(data), + Resp3Frame::BlobString { data, .. } => Some(data), + Resp3Frame::SimpleString { data, .. } => Some(data), + Resp3Frame::BlobError { data, .. } => Some(data), + Resp3Frame::SimpleError { data, .. } => Some(data.into_inner()), _ => None, } } -pub fn frame_to_str(frame: &Resp3Frame) -> Option { +pub fn frame_to_str(frame: Resp3Frame) -> Option { match frame { - Resp3Frame::BigNumber { data, .. } => Str::from_inner(data.clone()).ok(), - Resp3Frame::VerbatimString { data, .. } => Str::from_inner(data.clone()).ok(), - Resp3Frame::BlobString { data, .. } => Str::from_inner(data.clone()).ok(), - Resp3Frame::SimpleString { data, .. } => Str::from_inner(data.clone()).ok(), - Resp3Frame::BlobError { data, .. } => Str::from_inner(data.clone()).ok(), - Resp3Frame::SimpleError { data, .. } => Some(data.clone()), + Resp3Frame::BigNumber { data, .. } => Str::from_inner(data).ok(), + Resp3Frame::VerbatimString { data, .. } => Str::from_inner(data).ok(), + Resp3Frame::BlobString { data, .. } => Str::from_inner(data).ok(), + Resp3Frame::SimpleString { data, .. } => Str::from_inner(data).ok(), + Resp3Frame::BlobError { data, .. } => Str::from_inner(data).ok(), + Resp3Frame::SimpleError { data, .. } => Some(data), _ => None, } } #[cfg(feature = "i-hashes")] -fn parse_nested_map(data: FrameMap) -> Result { +fn parse_nested_map(data: FrameMap) -> Result { let mut out = HashMap::with_capacity(data.len()); for (key, value) in data.into_iter() { - let key: RedisKey = frame_to_results(key)?.try_into()?; + let key: Key = frame_to_results(key)?.try_into()?; let value = frame_to_results(value)?; out.insert(key, value); } - Ok(RedisMap { inner: out }) + Ok(Map { inner: out }) } /// Convert `nil` responses to a generic `Timeout` error. #[cfg(any(feature = "i-lists", feature = "i-sorted-sets"))] -pub fn check_null_timeout(frame: &Resp3Frame) -> Result<(), RedisError> { +pub fn check_null_timeout(frame: &Resp3Frame) -> Result<(), Error> { if frame.kind() == FrameKind::Null { - Err(RedisError::new(RedisErrorKind::Timeout, "Request timed out.")) + Err(Error::new(ErrorKind::Timeout, "Request timed out.")) } else { Ok(()) } } /// Parse the protocol frame into a redis value, with support for arbitrarily nested arrays. -pub fn frame_to_results(frame: Resp3Frame) -> Result { +pub fn frame_to_results(frame: Resp3Frame) -> Result { let value = match frame { - Resp3Frame::Null => RedisValue::Null, + Resp3Frame::Null => Value::Null, Resp3Frame::SimpleString { data, .. } => { let value = string_or_bytes(data); if value.as_str().map(|s| s == QUEUED).unwrap_or(false) { - RedisValue::Queued + Value::Queued } else { value } @@ -379,41 +375,36 @@ pub fn frame_to_results(frame: Resp3Frame) -> Result { Resp3Frame::Double { data, .. } => data.into(), Resp3Frame::BigNumber { data, .. } => string_or_bytes(data), Resp3Frame::Boolean { data, .. } => data.into(), - Resp3Frame::Array { data, .. } | Resp3Frame::Push { data, .. } => RedisValue::Array( + Resp3Frame::Array { data, .. } | Resp3Frame::Push { data, .. } => Value::Array( data .into_iter() .map(frame_to_results) - .collect::, _>>()?, + .collect::, _>>()?, ), - Resp3Frame::Set { data, .. } => RedisValue::Array( + Resp3Frame::Set { data, .. } => Value::Array( data .into_iter() .map(frame_to_results) - .collect::, _>>()?, + .collect::, _>>()?, ), Resp3Frame::Map { data, .. } => { let mut out = HashMap::with_capacity(data.len()); for (key, value) in data.into_iter() { - let key: RedisKey = frame_to_results(key)?.try_into()?; + let key: Key = frame_to_results(key)?.try_into()?; let value = frame_to_results(value)?; out.insert(key, value); } - RedisValue::Map(RedisMap { inner: out }) - }, - _ => { - return Err(RedisError::new( - RedisErrorKind::Protocol, - "Invalid response frame type.", - )) + Value::Map(Map { inner: out }) }, + _ => return Err(Error::new(ErrorKind::Protocol, "Invalid response frame type.")), }; Ok(value) } -/// Flatten a single nested layer of arrays or sets into one array. +/// Flatten a single nested layer of arrays or sets into an array. #[cfg(feature = "i-hashes")] pub fn flatten_frame(frame: Resp3Frame) -> Resp3Frame { match frame { @@ -470,18 +461,15 @@ pub fn flatten_frame(frame: Resp3Frame) -> Resp3Frame { } #[cfg(feature = "i-hashes")] -/// Convert a frame to a nested RedisMap. -pub fn frame_to_map(frame: Resp3Frame) -> Result { +/// Convert a frame to a nested `Map`. +pub fn frame_to_map(frame: Resp3Frame) -> Result { match frame { Resp3Frame::Array { mut data, .. } => { if data.is_empty() { - return Ok(RedisMap::new()); + return Ok(Map::new()); } if data.len() % 2 != 0 { - return Err(RedisError::new( - RedisErrorKind::Protocol, - "Expected an even number of frames.", - )); + return Err(Error::new(ErrorKind::Protocol, "Expected an even number of frames.")); } let mut inner = HashMap::with_capacity(data.len() / 2); @@ -492,7 +480,7 @@ pub fn frame_to_map(frame: Resp3Frame) -> Result { inner.insert(key, value); } - Ok(RedisMap { inner }) + Ok(Map { inner }) }, Resp3Frame::Map { data, .. } => parse_nested_map(data), Resp3Frame::SimpleError { data, .. } => Err(pretty_error(&data)), @@ -500,15 +488,12 @@ pub fn frame_to_map(frame: Resp3Frame) -> Result { let parsed = String::from_utf8_lossy(&data); Err(pretty_error(&parsed)) }, - _ => Err(RedisError::new( - RedisErrorKind::Protocol, - "Expected array or map frames.", - )), + _ => Err(Error::new(ErrorKind::Protocol, "Expected array or map frames.")), } } /// Convert a frame to a `RedisError`. -pub fn frame_to_error(frame: &Resp3Frame) -> Option { +pub fn frame_to_error(frame: &Resp3Frame) -> Option { match frame { Resp3Frame::SimpleError { ref data, .. } => Some(pretty_error(data)), Resp3Frame::BlobError { ref data, .. } => { @@ -519,18 +504,19 @@ pub fn frame_to_error(frame: &Resp3Frame) -> Option { } } -pub fn value_to_outgoing_resp2_frame(value: &RedisValue) -> Result { +pub fn value_to_outgoing_resp2_frame(value: &Value) -> Result { let frame = match value { - RedisValue::Double(ref f) => Resp2Frame::BulkString(f.to_string().into()), - RedisValue::Boolean(ref b) => Resp2Frame::BulkString(b.to_string().into()), - RedisValue::Integer(ref i) => Resp2Frame::BulkString(i.to_string().into()), - RedisValue::String(ref s) => Resp2Frame::BulkString(s.inner().clone()), - RedisValue::Bytes(ref b) => Resp2Frame::BulkString(b.clone()), - RedisValue::Queued => Resp2Frame::BulkString(Bytes::from_static(QUEUED.as_bytes())), - RedisValue::Null => Resp2Frame::Null, + Value::Double(ref f) => Resp2Frame::BulkString(f.to_string().into()), + Value::Boolean(ref b) => Resp2Frame::BulkString(b.to_string().into()), + // the `int_as_bulkstring` flag in redis-protocol converts this to a bulk string + Value::Integer(ref i) => Resp2Frame::Integer(*i), + Value::String(ref s) => Resp2Frame::BulkString(s.inner().clone()), + Value::Bytes(ref b) => Resp2Frame::BulkString(b.clone()), + Value::Queued => Resp2Frame::BulkString(Bytes::from_static(QUEUED.as_bytes())), + Value::Null => Resp2Frame::Null, _ => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, format!("Invalid argument type: {}", value.kind()), )) }, @@ -539,36 +525,37 @@ pub fn value_to_outgoing_resp2_frame(value: &RedisValue) -> Result Result { +pub fn value_to_outgoing_resp3_frame(value: &Value) -> Result { let frame = match value { - RedisValue::Double(ref f) => Resp3Frame::BlobString { + Value::Double(ref f) => Resp3Frame::BlobString { data: f.to_string().into(), attributes: None, }, - RedisValue::Boolean(ref b) => Resp3Frame::BlobString { + Value::Boolean(ref b) => Resp3Frame::BlobString { data: b.to_string().into(), attributes: None, }, - RedisValue::Integer(ref i) => Resp3Frame::BlobString { - data: i.to_string().into(), + // the `int_as_blobstring` flag in redis-protocol converts this to a blob string + Value::Integer(ref i) => Resp3Frame::Number { + data: *i, attributes: None, }, - RedisValue::String(ref s) => Resp3Frame::BlobString { + Value::String(ref s) => Resp3Frame::BlobString { data: s.inner().clone(), attributes: None, }, - RedisValue::Bytes(ref b) => Resp3Frame::BlobString { + Value::Bytes(ref b) => Resp3Frame::BlobString { data: b.clone(), attributes: None, }, - RedisValue::Queued => Resp3Frame::BlobString { + Value::Queued => Resp3Frame::BlobString { data: Bytes::from_static(QUEUED.as_bytes()), attributes: None, }, - RedisValue::Null => Resp3Frame::Null, + Value::Null => Resp3Frame::Null, _ => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, format!("Invalid argument type: {}", value.kind()), )) }, @@ -578,13 +565,13 @@ pub fn value_to_outgoing_resp3_frame(value: &RedisValue) -> Result Resp3Frame { +pub fn mocked_value_to_frame(value: Value) -> Resp3Frame { match value { - RedisValue::Array(values) => Resp3Frame::Array { + Value::Array(values) => Resp3Frame::Array { data: values.into_iter().map(mocked_value_to_frame).collect(), attributes: None, }, - RedisValue::Map(values) => Resp3Frame::Map { + Value::Map(values) => Resp3Frame::Map { data: values .inner() .into_iter() @@ -592,48 +579,45 @@ pub fn mocked_value_to_frame(value: RedisValue) -> Resp3Frame { .collect(), attributes: None, }, - RedisValue::Null => Resp3Frame::Null, - RedisValue::Queued => Resp3Frame::SimpleString { + Value::Null => Resp3Frame::Null, + Value::Queued => Resp3Frame::SimpleString { data: Bytes::from_static(QUEUED.as_bytes()), attributes: None, }, - RedisValue::Bytes(value) => Resp3Frame::BlobString { + Value::Bytes(value) => Resp3Frame::BlobString { data: value, attributes: None, }, - RedisValue::Boolean(value) => Resp3Frame::Boolean { + Value::Boolean(value) => Resp3Frame::Boolean { data: value, attributes: None, }, - RedisValue::Integer(value) => Resp3Frame::Number { + Value::Integer(value) => Resp3Frame::Number { data: value, attributes: None, }, - RedisValue::Double(value) => Resp3Frame::Double { + Value::Double(value) => Resp3Frame::Double { data: value, attributes: None, }, - RedisValue::String(value) => Resp3Frame::BlobString { + Value::String(value) => Resp3Frame::BlobString { data: value.into_inner(), attributes: None, }, } } -pub fn expect_ok(value: &RedisValue) -> Result<(), RedisError> { +pub fn expect_ok(value: &Value) -> Result<(), Error> { match *value { - RedisValue::String(ref resp) => { + Value::String(ref resp) => { if resp.deref() == OK || resp.deref() == QUEUED { Ok(()) } else { - Err(RedisError::new( - RedisErrorKind::Unknown, - format!("Expected OK, found {}", resp), - )) + Err(Error::new(ErrorKind::Unknown, format!("Expected OK, found {}", resp))) } }, - _ => Err(RedisError::new( - RedisErrorKind::Unknown, + _ => Err(Error::new( + ErrorKind::Unknown, format!("Expected OK, found {:?}.", value), )), } @@ -641,12 +625,12 @@ pub fn expect_ok(value: &RedisValue) -> Result<(), RedisError> { /// Parse the replicas from the ROLE response returned from a master/primary node. #[cfg(feature = "replicas")] -pub fn parse_master_role_replicas(data: RedisValue) -> Result, RedisError> { - let mut role: Vec = data.convert()?; +pub fn parse_master_role_replicas(data: Value) -> Result, Error> { + let mut role: Vec = data.convert()?; if role.len() == 3 { if role[0].as_str().map(|s| s == "master").unwrap_or(false) { - let replicas: Vec = role[2].take().convert()?; + let replicas: Vec = role[2].take().convert()?; Ok( replicas @@ -669,47 +653,44 @@ pub fn parse_master_role_replicas(data: RedisValue) -> Result, Redis } #[cfg(feature = "i-geo")] -pub fn assert_array_len(data: &[T], len: usize) -> Result<(), RedisError> { +pub fn assert_array_len(data: &[T], len: usize) -> Result<(), Error> { if data.len() == len { Ok(()) } else { - Err(RedisError::new( - RedisErrorKind::Parse, - format!("Expected {} values.", len), - )) + Err(Error::new(ErrorKind::Parse, format!("Expected {} values.", len))) } } /// Flatten a nested array of values into one array. -pub fn flatten_redis_value(value: RedisValue) -> RedisValue { - if let RedisValue::Array(values) = value { +pub fn flatten_value(value: Value) -> Value { + if let Value::Array(values) = value { let mut out = Vec::with_capacity(values.len()); for value in values.into_iter() { - let flattened = flatten_redis_value(value); - if let RedisValue::Array(flattened) = flattened { + let flattened = flatten_value(value); + if let Value::Array(flattened) = flattened { out.extend(flattened); } else { out.push(flattened); } } - RedisValue::Array(out) + Value::Array(out) } else { value } } /// Convert a redis value to an array of (value, score) tuples. -pub fn value_to_zset_result(value: RedisValue) -> Result, RedisError> { - let value = flatten_redis_value(value); +pub fn value_to_zset_result(value: Value) -> Result, Error> { + let value = flatten_value(value); - if let RedisValue::Array(mut values) = value { + if let Value::Array(mut values) = value { if values.is_empty() { return Ok(Vec::new()); } if values.len() % 2 != 0 { - return Err(RedisError::new( - RedisErrorKind::Unknown, + return Err(Error::new( + ErrorKind::Unknown, "Expected an even number of redis values.", )); } @@ -719,8 +700,8 @@ pub fn value_to_zset_result(value: RedisValue) -> Result, let score = match values.pop().unwrap().as_f64() { Some(f) => f, None => { - return Err(RedisError::new( - RedisErrorKind::Protocol, + return Err(Error::new( + ErrorKind::Protocol, "Could not convert value to floating point number.", )) }, @@ -732,49 +713,46 @@ pub fn value_to_zset_result(value: RedisValue) -> Result, Ok(out) } else { - Err(RedisError::new( - RedisErrorKind::Unknown, - "Expected array of redis values.", - )) + Err(Error::new(ErrorKind::Unknown, "Expected array of redis values.")) } } #[cfg(any(feature = "blocking-encoding", feature = "partial-tracing", feature = "full-tracing"))] fn i64_size(i: i64) -> usize { if i < 0 { - 1 + redis_protocol::digits_in_number(-i as usize) + 1 + redis_protocol::digits_in_usize(-i as usize) } else { - redis_protocol::digits_in_number(i as usize) + redis_protocol::digits_in_usize(i as usize) } } #[cfg(any(feature = "blocking-encoding", feature = "partial-tracing", feature = "full-tracing"))] -pub fn arg_size(value: &RedisValue) -> usize { +pub fn arg_size(value: &Value) -> usize { match value { // use the RESP2 size - RedisValue::Boolean(_) => 5, + Value::Boolean(_) => 5, // TODO try digits_in_number(f.trunc()) + 1 + digits_in_number(f.fract()) // but don't forget the negative sign byte - RedisValue::Double(_) => 10, - RedisValue::Null => 3, - RedisValue::Integer(ref i) => i64_size(*i), - RedisValue::String(ref s) => s.inner().len(), - RedisValue::Bytes(ref b) => b.len(), - RedisValue::Array(ref arr) => args_size(arr), - RedisValue::Map(ref map) => map + Value::Double(_) => 10, + Value::Null => 3, + Value::Integer(ref i) => i64_size(*i), + Value::String(ref s) => s.inner().len(), + Value::Bytes(ref b) => b.len(), + Value::Array(ref arr) => args_size(arr), + Value::Map(ref map) => map .inner .iter() .fold(0, |c, (k, v)| c + k.as_bytes().len() + arg_size(v)), - RedisValue::Queued => 0, + Value::Queued => 0, } } #[cfg(any(feature = "blocking-encoding", feature = "partial-tracing", feature = "full-tracing"))] -pub fn args_size(args: &[RedisValue]) -> usize { +pub fn args_size(args: &[Value]) -> usize { args.iter().fold(0, |c, arg| c + arg_size(arg)) } -fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result { +fn serialize_hello(command: &Command, version: &RespVersion) -> Result { let args = command.args(); let (auth, setname) = if args.len() == 3 { @@ -782,8 +760,8 @@ fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result username, None => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, "Invalid username. Expected string.", )); }, @@ -791,8 +769,8 @@ fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result password, None => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, "Invalid password. Expected string.", )); }, @@ -800,8 +778,8 @@ fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result val, None => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, "Invalid setname value. Expected string.", )); }, @@ -813,8 +791,8 @@ fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result username, None => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, "Invalid username. Expected string.", )); }, @@ -822,8 +800,8 @@ fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result password, None => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, "Invalid password. Expected string.", )); }, @@ -835,8 +813,8 @@ fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result val, None => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, "Invalid setname value. Expected string.", )); }, @@ -847,21 +825,21 @@ fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result Result { +// TODO find a way to optimize these functions to use borrowed frame types +pub fn command_to_resp3_frame(command: &Command) -> Result { let args = command.args(); match command.kind { - RedisCommandKind::_Custom(ref kind) => { + CommandKind::_Custom(ref kind) => { let parts: Vec<&str> = kind.cmd.trim().split(' ').collect(); let mut bulk_strings = Vec::with_capacity(parts.len() + args.len()); - for part in parts.into_iter() { bulk_strings.push(Resp3Frame::BlobString { data: part.as_bytes().to_vec().into(), @@ -872,12 +850,12 @@ pub fn command_to_resp3_frame(command: &RedisCommand) -> Result { + CommandKind::_HelloAllCluster(ref version) | CommandKind::_Hello(ref version) => { serialize_hello(command, version) }, _ => { @@ -898,19 +876,19 @@ pub fn command_to_resp3_frame(command: &RedisCommand) -> Result Result { +pub fn command_to_resp2_frame(command: &Command) -> Result { let args = command.args(); match command.kind { - RedisCommandKind::_Custom(ref kind) => { + CommandKind::_Custom(ref kind) => { let parts: Vec<&str> = kind.cmd.trim().split(' ').collect(); let mut bulk_strings = Vec::with_capacity(parts.len() + args.len()); @@ -921,7 +899,7 @@ pub fn command_to_resp2_frame(command: &RedisCommand) -> Result { let mut bulk_strings = Vec::with_capacity(args.len() + 2); @@ -934,21 +912,21 @@ pub fn command_to_resp2_frame(command: &RedisCommand) -> Result Result { +pub fn command_to_frame(command: &Command, is_resp3: bool) -> Result { if is_resp3 || command.kind.is_hello() { - command_to_resp3_frame(command).map(|c| c.into()) + command_to_resp3_frame(command) } else { - command_to_resp2_frame(command).map(|c| c.into()) + command_to_resp2_frame(command) } } -pub fn encode_frame(inner: &RefCount, command: &RedisCommand) -> Result { +pub fn encode_frame(inner: &RefCount, command: &Command) -> Result { #[cfg(all(feature = "blocking-encoding", not(feature = "glommio")))] return command.to_frame_blocking( inner.is_resp3(), @@ -959,7 +937,7 @@ pub fn encode_frame(inner: &RefCount, command: &RedisCommand) not(feature = "blocking-encoding"), all(feature = "blocking-encoding", feature = "glommio") ))] - return command.to_frame(inner.is_resp3()); + command.to_frame(inner.is_resp3()) } #[cfg(test)] @@ -967,6 +945,8 @@ mod tests { #![allow(dead_code)] #![allow(unused_imports)] use super::*; + #[cfg(feature = "i-cluster")] + use crate::types::cluster::{ClusterInfo, ClusterState}; use std::{collections::HashMap, time::Duration}; fn str_to_f(s: &str) -> Resp3Frame { @@ -1240,7 +1220,7 @@ mod tests { #[test] #[cfg(feature = "i-cluster")] fn should_parse_cluster_info() { - let input: RedisValue = "cluster_state:fail + let input: Value = "cluster_state:fail cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:3 diff --git a/src/router/centralized.rs b/src/router/centralized.rs index 85dbd4ba..78b9424d 100644 --- a/src/router/centralized.rs +++ b/src/router/centralized.rs @@ -1,130 +1,37 @@ use crate::{ - error::RedisErrorKind, - modules::inner::RedisClientInner, - prelude::RedisError, + error::ErrorKind, + modules::inner::ClientInner, + prelude::Error, protocol::{ - command::{RedisCommand, RouterResponse}, + command::Command, connection, - connection::{Counters, RedisWriter, SharedBuffer, SplitStreamKind}, + connection::Connection, responders::{self, ResponseKind}, - types::Server, - utils as protocol_utils, }, - router::{responses, utils, Connections, Written}, - runtime::{spawn, JoinHandle, RefCount}, - types::ServerConfig, + router::Connections, + runtime::RefCount, + types::config::ServerConfig, }; use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame}; use std::collections::VecDeque; -pub async fn write( - inner: &RefCount, - writer: &mut Option, - command: RedisCommand, - force_flush: bool, -) -> Written { - if let Some(writer) = writer.as_mut() { - utils::write_command(inner, writer, command, force_flush).await - } else { - _debug!(inner, "Failed to read connection for {}", command.kind.to_str_debug()); - Written::Disconnected(( - None, - Some(command), - RedisError::new(RedisErrorKind::IO, "Missing connection."), - )) - } -} - -/// Spawn a task to read response frames from the reader half of the socket. -#[allow(unused_assignments)] -pub fn spawn_reader_task( - inner: &RefCount, - mut reader: SplitStreamKind, - server: &Server, - buffer: &SharedBuffer, - counters: &Counters, - is_replica: bool, -) -> JoinHandle> { - let (inner, server) = (inner.clone(), server.clone()); - let (buffer, counters) = (buffer.clone(), counters.clone()); - #[cfg(feature = "glommio")] - let tq = inner.connection.connection_task_queue; - - let reader_ft = async move { - let mut last_error = None; - - loop { - let frame = match utils::next_frame(&inner, &mut reader, &server, &buffer).await { - Ok(Some(frame)) => frame.into_resp3(), - Ok(None) => { - last_error = None; - break; - }, - Err(error) => { - last_error = Some(error); - break; - }, - }; - - if let Some(error) = responses::check_special_errors(&inner, &frame) { - last_error = Some(error); - break; - } - if let Some(frame) = responses::check_pubsub_message(&inner, &server, frame) { - if let Err(e) = process_response_frame(&inner, &server, &buffer, &counters, frame).await { - _debug!(inner, "Error processing response frame from {}: {:?}", server, e); - last_error = Some(e); - break; - } - } - } - - // at this point the order of the shared buffer no longer matters since we can't know which commands actually made - // it to the server, just that the connection closed. the shared buffer will be drained when the writer notices - // that this task finished, but here we need to first filter out any commands that have exceeded their max write - // attempts. - utils::check_blocked_router(&inner, &buffer, &last_error); - utils::check_final_write_attempt(&inner, &buffer, &last_error); - if is_replica { - responses::broadcast_replica_error(&inner, &server, last_error); - } else { - responses::broadcast_reader_error(&inner, &server, last_error); - } - utils::remove_cached_connection_id(&inner, &server).await; - inner.remove_connection(&server); - - _debug!(inner, "Ending reader task from {}", server); - Ok(()) - }; - - #[cfg(feature = "glommio")] - if let Some(tq) = tq { - crate::runtime::spawn_into(reader_ft, tq) - } else { - spawn(reader_ft) - } - #[cfg(not(feature = "glommio"))] - spawn(reader_ft) -} - /// Process the response frame in the context of the last command. /// /// Errors returned here will be logged, but will not close the socket or initiate a reconnect. -pub async fn process_response_frame( - inner: &RefCount, - server: &Server, - buffer: &SharedBuffer, - counters: &Counters, +#[inline(always)] +pub fn process_response_frame( + inner: &RefCount, + conn: &mut Connection, frame: Resp3Frame, -) -> Result<(), RedisError> { - _trace!(inner, "Parsing response frame from {}", server); - let mut command = match buffer.pop() { +) -> Result<(), Error> { + _trace!(inner, "Parsing response frame from {}", conn.server); + let mut command = match conn.buffer.pop_front() { Some(command) => command, None => { _debug!( inner, "Missing last command from {}. Dropping {:?}.", - server, + conn.server, frame.kind() ); return Ok(()); @@ -136,38 +43,17 @@ pub async fn process_response_frame( command.kind.to_str_debug(), command.debug_id() ); - counters.decr_in_flight(); if command.blocks_connection() { - buffer.set_unblocked(); - } - responses::check_and_set_unblocked_flag(inner, &command).await; - - // non-pipelined transactions use ResponseKind::Skip, pipelined ones use a buffer. non-pipelined transactions - // need to retry commands in a special way so this logic forwards the result via the latest command's router - // response channel and exits early. pipelined transactions use the normal buffered response process below. - if command.in_non_pipelined_transaction() { - if let Some(error) = protocol_utils::frame_to_error(&frame) { - #[allow(unused_mut)] - if let Some(mut tx) = command.take_router_tx() { - let _ = tx.send(RouterResponse::TransactionError((error, command))); - } - return Ok(()); - } else if command.kind.ends_transaction() { - command.respond_to_router(inner, RouterResponse::TransactionResult(frame)); - return Ok(()); - } else { - command.respond_to_router(inner, RouterResponse::Continue); - return Ok(()); - } + conn.blocked = false; + inner.backchannel.set_unblocked(); } + #[cfg(feature = "partial-tracing")] + let _ = command.traces.network.take(); _trace!(inner, "Handling centralized response kind: {:?}", command.response); match command.take_response() { - ResponseKind::Skip | ResponseKind::Respond(None) => { - command.respond_to_router(inner, RouterResponse::Continue); - Ok(()) - }, - ResponseKind::Respond(Some(tx)) => responders::respond_to_caller(inner, server, command, tx, frame), + ResponseKind::Skip | ResponseKind::Respond(None) => Ok(()), + ResponseKind::Respond(Some(tx)) => responders::respond_to_caller(inner, &conn.server, command, tx, frame), ResponseKind::Buffer { received, expected, @@ -177,7 +63,7 @@ pub async fn process_response_frame( error_early, } => responders::respond_buffer( inner, - server, + &conn.server, command, received, expected, @@ -187,10 +73,10 @@ pub async fn process_response_frame( tx, frame, ), - ResponseKind::KeyScan(scanner) => responders::respond_key_scan(inner, server, command, scanner, frame), - ResponseKind::ValueScan(scanner) => responders::respond_value_scan(inner, server, command, scanner, frame), + ResponseKind::KeyScan(scanner) => responders::respond_key_scan(inner, &conn.server, command, scanner, frame), + ResponseKind::ValueScan(scanner) => responders::respond_value_scan(inner, &conn.server, command, scanner, frame), ResponseKind::KeyScanBuffered(scanner) => { - responders::respond_key_scan_buffered(inner, server, command, scanner, frame) + responders::respond_key_scan_buffered(inner, &conn.server, command, scanner, frame) }, } } @@ -198,33 +84,29 @@ pub async fn process_response_frame( /// Initialize fresh connections to the server, dropping any old connections and saving in-flight commands on /// `buffer`. pub async fn initialize_connection( - inner: &RefCount, + inner: &RefCount, connections: &mut Connections, - buffer: &mut VecDeque, -) -> Result<(), RedisError> { + buffer: &mut VecDeque, +) -> Result<(), Error> { _debug!(inner, "Initializing centralized connection."); - let commands = connections.disconnect_all(inner).await; - buffer.extend(commands); + buffer.extend(connections.disconnect_all(inner).await); match connections { - Connections::Centralized { writer, .. } => { + Connections::Centralized { connection: writer, .. } => { let server = match inner.config.server { ServerConfig::Centralized { ref server } => server.clone(), #[cfg(feature = "unix-sockets")] ServerConfig::Unix { ref path } => path.as_path().into(), - _ => return Err(RedisError::new(RedisErrorKind::Config, "Expected centralized config.")), + _ => return Err(Error::new(ErrorKind::Config, "Expected centralized config.")), }; let mut transport = connection::create(inner, &server, None).await?; transport.setup(inner, None).await?; - let (server, _writer) = connection::split(inner, transport, false, spawn_reader_task)?; + let connection = transport.into_pipelined(false); inner.notifications.broadcast_reconnect(server); - *writer = Some(_writer); + *writer = Some(connection); Ok(()) }, - _ => Err(RedisError::new( - RedisErrorKind::Config, - "Expected centralized connection.", - )), + _ => Err(Error::new(ErrorKind::Config, "Expected centralized connection.")), } } diff --git a/src/router/clustered.rs b/src/router/clustered.rs index f721e762..91ea34d6 100644 --- a/src/router/clustered.rs +++ b/src/router/clustered.rs @@ -1,199 +1,198 @@ use crate::{ - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces, - modules::inner::RedisClientInner, + modules::inner::ClientInner, protocol::{ - command::{ClusterErrorKind, RedisCommand, RedisCommandKind, RouterCommand, RouterResponse}, - connection::{self, Counters, RedisTransport, RedisWriter, SharedBuffer, SplitStreamKind}, + command::{ClusterErrorKind, Command, CommandKind, RouterCommand}, + connection::{self, Connection, ExclusiveConnection}, responders, responders::ResponseKind, - types::{ClusterRouting, Server, SlotRange}, + types::{ClusterRouting, ProtocolFrame, Server, SlotRange}, utils as protocol_utils, }, - router::{responses, types::ClusterChange, utils, Connections, Written}, - runtime::{spawn, JoinHandle, Mutex, RefCount}, - types::{ClusterDiscoveryPolicy, ClusterStateChange}, + router::{types::ClusterChange, Connections, Router}, + runtime::{Mutex, RefCount}, + types::{config::ClusterDiscoveryPolicy, ClusterStateChange}, utils as client_utils, }; -use futures::future::try_join_all; +use futures::future::{join_all, try_join_all}; use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, FrameKind, Resp3Frame as _Resp3Frame}; use std::{ - collections::{BTreeSet, HashMap, VecDeque}, - iter::repeat, + collections::{BTreeSet, HashMap, HashSet, VecDeque}, + ops::DerefMut, }; -/// Find the cluster node that should receive the command. -pub fn route_command<'a>( - inner: &RefCount, - state: &'a ClusterRouting, - command: &RedisCommand, -) -> Option<&'a Server> { - if let Some(ref server) = command.cluster_node { - // this `_server` has a lifetime tied to `command`, so we switch `server` to refer to the record in `state` while - // we check whether that node exists in the cluster. we return None here if the command specifies a server that - // does not exist in the cluster. - _trace!(inner, "Routing with custom cluster node: {}", server); - state.slots().iter().find_map(|slot| { - if slot.primary == *server { - Some(&slot.primary) - } else { - None +async fn write_all_nodes( + inner: &RefCount, + writers: &mut HashMap, + frame: &ProtocolFrame, +) -> Vec> { + let num_nodes = writers.len(); + let mut write_ft = Vec::with_capacity(num_nodes); + for (idx, (server, conn)) in writers.iter_mut().enumerate() { + let frame = frame.clone(); + write_ft.push(async move { + _debug!(inner, "Writing command to {} ({}/{})", server, idx + 1, num_nodes); + + if let Some(err) = conn.peek_reader_errors().await { + _debug!(inner, "Error sending command: {:?}", err); + return Err(err); } - }) - } else { - command - .cluster_hash() - .and_then(|slot| state.get_server(slot)) - .or_else(|| { - // for some commands we know they can go to any node, but for others it may depend on the arguments provided. - if command.args().is_empty() || command.kind.use_random_cluster_node() { - let node = state.random_node(); - _trace!( - inner, - "Using random cluster node `{:?}` for {}", - node, - command.kind.to_str_debug() - ); - node - } else { - None - } - }) - } -} -/// Write a command to the cluster according to the [cluster hashing](https://redis.io/docs/reference/cluster-spec/) interface. -pub async fn write( - inner: &RefCount, - writers: &mut HashMap, - state: &ClusterRouting, - command: RedisCommand, - force_flush: bool, -) -> Written { - let has_custom_server = command.cluster_node.is_some(); - let server = match route_command(inner, state, &command) { - Some(server) => server, - None => { - return if has_custom_server { - _debug!( - inner, - "Respond to caller with error from missing cluster node override ({:?})", - command.cluster_node - ); - command.finish( - inner, - Err(RedisError::new( - RedisErrorKind::Cluster, - "Missing cluster node override.", - )), - ); - - Written::Ignore + let server = if let Err(err) = conn.write(frame, true, false).await { + debug!("{}: Error sending frame to socket: {:?}", conn.server, err); + return Err(err); } else { - // these errors usually mean the cluster is partially down or misconfigured - _warn!( - inner, - "Possible cluster misconfiguration. Missing hash slot owner for {:?}.", - command.cluster_hash() - ); - Written::NotFound(command) + server.clone() }; - }, - }; + if let Err(err) = conn.flush().await { + debug!("{}: Error flushing socket: {:?}", conn.server, err); + Err(err) + } else { + Ok(server) + } + }); + } - if let Some(writer) = writers.get_mut(server) { - _debug!(inner, "Writing command `{}` to {}", command.kind.to_str_debug(), server); - utils::write_command(inner, writer, command, force_flush).await - } else { - // a reconnect message should already be queued from the reader task - _debug!( - inner, - "Failed to read connection {} for {}", - server, - command.kind.to_str_debug() - ); + join_all(write_ft).await +} + +/// Read the next non-pubsub frame from all connections concurrently. +async fn read_all_nodes( + inner: &RefCount, + writers: &mut HashMap, + filter: &HashSet, +) -> Vec, Error>> { + join_all(writers.iter_mut().map(|(server, conn)| async { + if filter.contains(server) { + match conn.read_skip_pubsub(inner).await? { + Some(frame) => Ok(Some((server.clone(), frame))), + None => Ok(None), + } + } else { + Ok(None) + } + })) + .await +} - Written::Disconnected(( - Some(server.clone()), - Some(command), - RedisError::new(RedisErrorKind::IO, "Missing connection."), - )) +/// Find the first error or buffer successful frames into an array. +fn parse_all_responses(results: &[Result, Error>]) -> Result { + let mut responses = Vec::with_capacity(results.len()); + for result in results.iter() { + match result { + Ok(Some((_, frame))) => { + if let Some(err) = protocol_utils::frame_to_error(frame) { + return Err(err); + } else { + responses.push(frame.clone()); + } + }, + Ok(None) => continue, + Err(err) => return Err(err.clone()), + } } + + Ok(Resp3Frame::Array { + data: responses, + attributes: None, + }) } /// Send a command to all cluster nodes. /// -/// Note: if any of the commands fail to send the entire command is interrupted. -// There's probably a much cleaner way to express this. Most of the complexity here comes from the need to -// pre-allocate and assign response locations in the buffer ahead of time. This is done to avoid any race conditions. +/// The caller must drain the in-flight buffers before calling this. pub async fn send_all_cluster_command( - inner: &RefCount, - writers: &mut HashMap, - mut command: RedisCommand, -) -> Result<(), RedisError> { - let num_nodes = writers.len(); - if let ResponseKind::Buffer { - ref mut frames, - ref mut expected, - .. - } = command.response - { - *expected = num_nodes; - - _trace!( - inner, - "Allocating {} null responses in buffer for {}.", - num_nodes, - command.kind.to_str_debug(), - ); - let mut guard = frames.lock(); - // pre-allocate responses - *guard = repeat(Resp3Frame::Null).take(num_nodes).collect(); - } - let mut responder = match command.response.duplicate() { - Some(resp) => resp, - None => { - return Err(RedisError::new( - RedisErrorKind::Config, - "Invalid command response type.", - )) - }, - }; - - for (idx, (server, writer)) in writers.iter_mut().enumerate() { - _debug!( - inner, - "Sending all cluster command to {} with index {}, ID: {}", - server, - idx, - command.debug_id() - ); - let mut cmd_responder = responder.duplicate().unwrap_or(ResponseKind::Skip); - cmd_responder.set_expected_index(idx); - let mut cmd = command.duplicate(cmd_responder); - cmd.skip_backpressure = true; + inner: &RefCount, + router: &mut Router, + mut command: Command, +) -> Result<(), Error> { + match router.connections { + Connections::Clustered { + connections: ref mut writers, + .. + } => { + let mut out = Ok(()); + let mut disconnect = Vec::new(); + // write to all the cluster nodes, keeping track of which ones failed, then try to read from the ones that + // succeeded. at the end disconnect from all the nodes that failed writes or reads and return the last error. + let frame = protocol_utils::encode_frame(inner, &command)?; + let all_nodes: HashSet<_> = writers.keys().cloned().collect(); + + let results = write_all_nodes(inner, writers, &frame).await; + let write_success: HashSet<_> = results + .into_iter() + .filter_map(|r| match r { + Ok(server) => Some(server), + Err(e) => { + out = Err(e); + None + }, + }) + .collect(); + let write_failed: Vec<_> = { + all_nodes + .difference(&write_success) + .inspect(|server| { + disconnect.push((*server).clone()); + }) + .collect() + }; + _debug!(inner, "Failed sending command to {:?}", write_failed); + + // try to read from all nodes concurrently, keeping track of which ones failed + let results = read_all_nodes(inner, writers, &write_success).await; + command.respond_to_caller(parse_all_responses(&results)); + + let read_success: HashSet<_> = results + .into_iter() + .filter_map(|result| match result { + Ok(Some((server, _))) => Some(server), + Ok(None) => None, + Err(e) => { + out = Err(e); + None + }, + }) + .collect(); + let read_failed: Vec<_> = { + all_nodes + .difference(&read_success) + .inspect(|server| { + disconnect.push((*server).clone()); + }) + .collect() + }; + _debug!(inner, "Failed reading responses from {:?}", read_failed); + + // disconnect from all the connections that failed writing or reading + for server in disconnect.into_iter() { + let mut conn = match writers.remove(&server) { + Some(conn) => conn, + None => continue, + }; + + // the retry buffer is empty since the caller must drain the connection beforehand in this context + let result = client_utils::timeout( + async move { + let _ = conn.close().await; + Ok::<(), Error>(()) + }, + inner.connection.internal_command_timeout, + ) + .await; + if let Err(err) = result { + _warn!(inner, "Error disconnecting {:?}", err); + } + } - if let Written::Disconnected((server, _, err)) = utils::write_command(inner, writer, cmd, true).await { - _debug!( - inner, - "Exit all nodes command early ({}/{}: {:?}) from error: {:?}", - idx + 1, - num_nodes, - server, - err - ); - responder.respond_with_error(err); - break; - } + out + }, + _ => Err(Error::new(ErrorKind::Config, "Expected clustered config.")), } - - Ok(()) } -pub fn parse_cluster_changes( - cluster_state: &ClusterRouting, - writers: &HashMap, -) -> ClusterChange { +pub fn parse_cluster_changes(cluster_state: &ClusterRouting, writers: &HashMap) -> ClusterChange { let mut old_servers = BTreeSet::new(); let mut new_servers = BTreeSet::new(); for server in cluster_state.unique_primary_nodes().into_iter() { @@ -208,7 +207,7 @@ pub fn parse_cluster_changes( ClusterChange { add, remove } } -pub fn broadcast_cluster_change(inner: &RefCount, changes: &ClusterChange) { +pub fn broadcast_cluster_change(inner: &RefCount, changes: &ClusterChange) { let mut added: Vec = changes .add .iter() @@ -230,207 +229,68 @@ pub fn broadcast_cluster_change(inner: &RefCount, changes: &Cl inner.notifications.broadcast_cluster_change(changes); } -/// Spawn a task to read response frames from the reader half of the socket. -#[allow(unused_assignments)] -pub fn spawn_reader_task( - inner: &RefCount, - mut reader: SplitStreamKind, - server: &Server, - buffer: &SharedBuffer, - counters: &Counters, - is_replica: bool, -) -> JoinHandle> { - let (inner, server) = (inner.clone(), server.clone()); - let (buffer, counters) = (buffer.clone(), counters.clone()); - #[cfg(feature = "glommio")] - let tq = inner.connection.connection_task_queue; - - let reader_ft = async move { - let mut last_error = None; - - loop { - let frame = match utils::next_frame(&inner, &mut reader, &server, &buffer).await { - Ok(Some(frame)) => frame.into_resp3(), - Ok(None) => { - last_error = None; - break; - }, - Err(e) => { - last_error = Some(e); - break; - }, - }; - - if let Some(error) = responses::check_special_errors(&inner, &frame) { - last_error = Some(error); - break; - } - if let Some(frame) = responses::check_pubsub_message(&inner, &server, frame) { - if let Err(e) = process_response_frame(&inner, &server, &buffer, &counters, frame).await { - _debug!( - inner, - "Error processing clustered response frame from {}: {:?}", - server, - e - ); - last_error = Some(e); - break; - } - } - } - - // see the centralized variant of this function for more information. - utils::check_blocked_router(&inner, &buffer, &last_error); - utils::check_final_write_attempt(&inner, &buffer, &last_error); - if is_replica { - responses::broadcast_replica_error(&inner, &server, last_error); - } else { - responses::broadcast_reader_error(&inner, &server, last_error); - } - utils::remove_cached_connection_id(&inner, &server).await; - inner.remove_connection(&server); - - _debug!(inner, "Ending reader task from {}", server); - Ok(()) - }; - - #[cfg(feature = "glommio")] - if let Some(tq) = tq { - crate::runtime::spawn_into(reader_ft, tq) - } else { - spawn(reader_ft) - } - #[cfg(not(feature = "glommio"))] - spawn(reader_ft) -} - /// Parse a cluster redirection frame from the provided server, returning the new destination node info. pub fn parse_cluster_error_frame( - inner: &RefCount, + inner: &RefCount, frame: &Resp3Frame, server: &Server, -) -> Result<(ClusterErrorKind, u16, Server), RedisError> { +) -> Result<(ClusterErrorKind, u16, Server), Error> { let (kind, slot, server_str) = match frame.as_str() { Some(data) => protocol_utils::parse_cluster_error(data)?, - None => return Err(RedisError::new(RedisErrorKind::Protocol, "Invalid cluster error.")), + None => return Err(Error::new(ErrorKind::Protocol, "Invalid cluster error.")), }; let server = match Server::from_parts(&server_str, &server.host) { Some(server) => server, None => { _warn!(inner, "Invalid server field in cluster error: {}", server_str); - return Err(RedisError::new( - RedisErrorKind::Protocol, - "Invalid cluster redirection error.", - )); + return Err(Error::new(ErrorKind::Protocol, "Invalid cluster redirection error.")); }, }; Ok((kind, slot, server)) } -/// Send a MOVED or ASK command to the router, using the router channel if possible and falling back on the -/// command queue if appropriate. -// Cluster errors within a non-pipelined transaction can only be handled via the blocking router channel. -fn process_cluster_error( - inner: &RefCount, - server: &Server, - mut command: RedisCommand, - frame: Resp3Frame, -) { +/// Process a MOVED or ASK error, retrying commands via the command channel if needed. +/// +/// Errors returned here should end the router task. +pub fn redirect_command(inner: &RefCount, server: &Server, mut command: Command, frame: Resp3Frame) { // commands are not redirected to replica nodes command.use_replica = false; let (kind, slot, server) = match parse_cluster_error_frame(inner, &frame, server) { Ok(results) => results, Err(e) => { - command.respond_to_router(inner, RouterResponse::Continue); command.respond_to_caller(Err(e)); return; }, }; - #[allow(unused_mut)] - if let Some(mut tx) = command.take_router_tx() { - let response = match kind { - ClusterErrorKind::Ask => RouterResponse::Ask((slot, server, command)), - ClusterErrorKind::Moved => RouterResponse::Moved((slot, server, command)), - }; - - _debug!(inner, "Sending cluster error to router channel."); - if let Err(response) = tx.send(response) { - #[cfg(feature = "glommio")] - let response = response.into_inner(); - - // if it could not be sent on the router tx then send it on the command channel - let command = match response { - RouterResponse::Ask((slot, server, command)) => { - if command.transaction_id.is_some() { - _debug!( - inner, - "Failed sending ASK cluster error to router in transaction: {}", - command.kind.to_str_debug() - ); - // do not send the command to the command queue - return; - } else { - RouterCommand::Ask { slot, server, command } - } - }, - RouterResponse::Moved((slot, server, command)) => { - if command.transaction_id.is_some() { - _debug!( - inner, - "Failed sending MOVED cluster error to router in transaction: {}", - command.kind.to_str_debug() - ); - // do not send the command to the command queue - return; - } else { - RouterCommand::Moved { slot, server, command } - } - }, - _ => { - _error!(inner, "Invalid cluster error router response type."); - return; - }, - }; - - _debug!(inner, "Sending cluster error to command queue."); - if let Err(e) = interfaces::send_to_router(inner, command) { - _warn!(inner, "Cannot send MOVED to router channel: {:?}", e); - } - } - } else { - let command = match kind { - ClusterErrorKind::Ask => RouterCommand::Ask { slot, server, command }, - ClusterErrorKind::Moved => RouterCommand::Moved { slot, server, command }, - }; - - _debug!(inner, "Sending cluster error to command queue."); - if let Err(e) = interfaces::send_to_router(inner, command) { - _warn!(inner, "Cannot send ASKED to router channel: {:?}", e); - } + let command = match kind { + ClusterErrorKind::Ask => RouterCommand::Ask { slot, server, command }, + ClusterErrorKind::Moved => RouterCommand::Moved { slot, server, command }, + }; + _debug!(inner, "Sending cluster error to command queue."); + if let Err(e) = interfaces::send_to_router(inner, command) { + _warn!(inner, "Cannot send ASKED to router channel: {:?}", e); } } /// Process the response frame in the context of the last command. /// /// Errors returned here will be logged, but will not close the socket or initiate a reconnect. -pub async fn process_response_frame( - inner: &RefCount, - server: &Server, - buffer: &SharedBuffer, - counters: &Counters, +pub fn process_response_frame( + inner: &RefCount, + conn: &mut Connection, frame: Resp3Frame, -) -> Result<(), RedisError> { - _trace!(inner, "Parsing response frame from {}", server); - let mut command = match buffer.pop() { +) -> Result<(), Error> { + _trace!(inner, "Parsing response frame from {}", conn.server); + let mut command = match conn.buffer.pop_front() { Some(command) => command, None => { _debug!( inner, "Missing last command from {}. Dropping {:?}.", - server, + conn.server, frame.kind() ); return Ok(()); @@ -442,51 +302,22 @@ pub async fn process_response_frame( command.kind.to_str_debug(), command.debug_id() ); - counters.decr_in_flight(); if command.blocks_connection() { - buffer.set_unblocked(); + conn.blocked = false; + inner.backchannel.set_unblocked(); } - responses::check_and_set_unblocked_flag(inner, &command).await; + #[cfg(feature = "partial-tracing")] + let _ = command.traces.network.take(); - // pipelined transactions defer cluster redirections until after `EXECABORT` is received - if frame.is_redirection() && !command.in_pipelined_transaction() { - _debug!( - inner, - "Recv MOVED or ASK error for `{}` from {}: {:?}", - command.kind.to_str_debug(), - server, - frame.as_str() - ); - process_cluster_error(inner, server, command, frame); + if frame.is_redirection() { + redirect_command(inner, &conn.server, command, frame); return Ok(()); } - // non-pipelined transactions use ResponseKind::Skip, pipelined ones use a buffer. non-pipelined transactions - // need to retry commands in a special way so this logic forwards the result via the latest command's router - // response channel and exits early. pipelined transactions use the normal buffered response process below. - if command.in_non_pipelined_transaction() { - if let Some(error) = protocol_utils::frame_to_error(&frame) { - #[allow(unused_mut)] - if let Some(mut tx) = command.take_router_tx() { - let _ = tx.send(RouterResponse::TransactionError((error, command))); - } - return Ok(()); - } else if command.kind.ends_transaction() { - command.respond_to_router(inner, RouterResponse::TransactionResult(frame)); - return Ok(()); - } else { - command.respond_to_router(inner, RouterResponse::Continue); - return Ok(()); - } - } - _trace!(inner, "Handling clustered response kind: {:?}", command.response); match command.take_response() { - ResponseKind::Skip | ResponseKind::Respond(None) => { - command.respond_to_router(inner, RouterResponse::Continue); - Ok(()) - }, - ResponseKind::Respond(Some(tx)) => responders::respond_to_caller(inner, server, command, tx, frame), + ResponseKind::Skip | ResponseKind::Respond(None) => Ok(()), + ResponseKind::Respond(Some(tx)) => responders::respond_to_caller(inner, &conn.server, command, tx, frame), ResponseKind::Buffer { received, expected, @@ -496,7 +327,7 @@ pub async fn process_response_frame( error_early, } => responders::respond_buffer( inner, - server, + &conn.server, command, received, expected, @@ -506,19 +337,19 @@ pub async fn process_response_frame( tx, frame, ), - ResponseKind::KeyScan(scanner) => responders::respond_key_scan(inner, server, command, scanner, frame), - ResponseKind::ValueScan(scanner) => responders::respond_value_scan(inner, server, command, scanner, frame), + ResponseKind::KeyScan(scanner) => responders::respond_key_scan(inner, &conn.server, command, scanner, frame), + ResponseKind::ValueScan(scanner) => responders::respond_value_scan(inner, &conn.server, command, scanner, frame), ResponseKind::KeyScanBuffered(scanner) => { - responders::respond_key_scan_buffered(inner, server, command, scanner, frame) + responders::respond_key_scan_buffered(inner, &conn.server, command, scanner, frame) }, } } /// Try connecting to any node in the provided `RedisConfig` or `old_servers`. pub async fn connect_any( - inner: &RefCount, + inner: &RefCount, old_cache: Option<&[SlotRange]>, -) -> Result { +) -> Result { let mut all_servers: BTreeSet = if let Some(old_cache) = old_cache { old_cache.iter().map(|slot_range| slot_range.primary.clone()).collect() } else { @@ -552,10 +383,7 @@ pub async fn connect_any( return Ok(connection); } - Err(last_error.unwrap_or(RedisError::new( - RedisErrorKind::Cluster, - "Failed connecting to any cluster node.", - ))) + Err(last_error.unwrap_or(Error::new(ErrorKind::Cluster, "Failed connecting to any cluster node."))) } /// Run the `CLUSTER SLOTS` command on the backchannel, creating a new connection if needed. @@ -565,21 +393,21 @@ pub async fn connect_any( /// /// If this returns an error then all known cluster nodes are unreachable. pub async fn cluster_slots_backchannel( - inner: &RefCount, + inner: &RefCount, cache: Option<&ClusterRouting>, force_disconnect: bool, -) -> Result { +) -> Result { if force_disconnect { - inner.backchannel.write().await.check_and_disconnect(inner, None).await; + inner.backchannel.check_and_disconnect(inner, None).await; } let (response, host) = { - let command: RedisCommand = RedisCommandKind::ClusterSlots.into(); + let command: Command = CommandKind::ClusterSlots.into(); let backchannel_result = { // try to use the existing backchannel connection first - let mut backchannel = inner.backchannel.write().await; - if let Some(ref mut transport) = backchannel.transport { + let mut backchannel = inner.backchannel.transport.write().await; + if let Some(ref mut transport) = backchannel.deref_mut() { let default_host = transport.default_host.clone(); _trace!(inner, "Sending backchannel CLUSTER SLOTS to {}", transport.server); @@ -595,7 +423,7 @@ pub async fn cluster_slots_backchannel( } }; if backchannel_result.is_none() { - inner.backchannel.write().await.check_and_disconnect(inner, None).await; + inner.backchannel.check_and_disconnect(inner, None).await; } // failing the backchannel, try to connect to any of the user-provided hosts or the last known cluster nodes @@ -608,7 +436,7 @@ pub async fn cluster_slots_backchannel( cache.map(|cache| cache.slots()) }; - let command: RedisCommand = RedisCommandKind::ClusterSlots.into(); + let command: Command = CommandKind::ClusterSlots.into(); let (frame, host) = if let Some((frame, host)) = backchannel_result { let kind = frame.kind(); @@ -646,9 +474,9 @@ pub async fn cluster_slots_backchannel( }; _trace!(inner, "Recv CLUSTER SLOTS response: {:?}", response); if response.is_null() { - inner.backchannel.write().await.check_and_disconnect(inner, None).await; - return Err(RedisError::new( - RedisErrorKind::Protocol, + inner.backchannel.check_and_disconnect(inner, None).await; + return Err(Error::new( + ErrorKind::Protocol, "Invalid or missing CLUSTER SLOTS response.", )); } @@ -659,16 +487,16 @@ pub async fn cluster_slots_backchannel( Ok(new_cache) } -/// Check each connection and remove it from the writer map if it's not [working](RedisWriter::is_working). -pub async fn drop_broken_connections(writers: &mut HashMap) -> VecDeque { +/// Check each connection and remove it from the writer map if it's not working. +pub async fn drop_broken_connections(writers: &mut HashMap) -> VecDeque { let mut new_writers = HashMap::with_capacity(writers.len()); let mut buffer = VecDeque::new(); - for (server, writer) in writers.drain() { - if writer.is_working() { - new_writers.insert(server, writer); + for (server, mut writer) in writers.drain() { + if writer.peek_reader_errors().await.is_some() { + buffer.extend(writer.close().await); } else { - buffer.extend(writer.graceful_close().await); + new_writers.insert(server, writer); } } @@ -678,80 +506,76 @@ pub async fn drop_broken_connections(writers: &mut HashMap) /// Run `CLUSTER SLOTS`, update the cached routing table, and modify the connection map. pub async fn sync( - inner: &RefCount, - connections: &mut Connections, - buffer: &mut VecDeque, -) -> Result<(), RedisError> { + inner: &RefCount, + connections: &mut HashMap, + cache: &mut ClusterRouting, + buffer: &mut VecDeque, +) -> Result<(), Error> { _debug!(inner, "Synchronizing cluster state."); - if let Connections::Clustered { cache, writers } = connections { - // force disconnect after a connection unexpectedly closes or goes unresponsive - let force_disconnect = writers.is_empty() - || writers - .values() - .find_map(|t| if t.is_working() { None } else { Some(true) }) - .unwrap_or(false); - - let state = cluster_slots_backchannel(inner, Some(&*cache), force_disconnect).await?; - _debug!(inner, "Cluster routing state: {:?}", state.pretty()); - // update the cached routing table - inner - .server_state - .write() - .kind - .update_cluster_state(Some(state.clone())); - *cache = state.clone(); - - buffer.extend(drop_broken_connections(writers).await); - // detect changes to the cluster topology - let changes = parse_cluster_changes(&state, writers); - _debug!(inner, "Changing cluster connections: {:?}", changes); - broadcast_cluster_change(inner, &changes); - - // drop connections that are no longer used - for removed_server in changes.remove.into_iter() { - _debug!(inner, "Disconnecting from cluster node {}", removed_server); - let writer = match writers.remove(&removed_server) { - Some(writer) => writer, - None => continue, - }; - - let commands = writer.graceful_close().await; - buffer.extend(commands); - } + // force disconnect if connections is empty or any readers have pending errors + let force_disconnect = connections.is_empty() + || join_all(connections.values_mut().map(|c| c.peek_reader_errors())) + .await + .into_iter() + .filter(|err| err.is_some()) + .collect::>() + .is_empty(); + + let state = cluster_slots_backchannel(inner, Some(&*cache), force_disconnect).await?; + _debug!(inner, "Cluster routing state: {:?}", state.pretty()); + // update the cached routing table + inner + .server_state + .write() + .kind + .update_cluster_state(Some(state.clone())); + *cache = state.clone(); + + buffer.extend(drop_broken_connections(connections).await); + // detect changes to the cluster topology + let changes = parse_cluster_changes(&state, connections); + _debug!(inner, "Changing cluster connections: {:?}", changes); + broadcast_cluster_change(inner, &changes); + + // drop connections that are no longer used + for removed_server in changes.remove.into_iter() { + _debug!(inner, "Disconnecting from cluster node {}", removed_server); + let mut writer = match connections.remove(&removed_server) { + Some(writer) => writer, + None => continue, + }; - let mut connections_ft = Vec::with_capacity(changes.add.len()); - let new_writers = RefCount::new(Mutex::new(HashMap::with_capacity(changes.add.len()))); - // connect to each of the new nodes - for server in changes.add.into_iter() { - let _inner = inner.clone(); - let _new_writers = new_writers.clone(); - connections_ft.push(async move { - _debug!(inner, "Connecting to cluster node {}", server); - let mut transport = connection::create(&_inner, &server, None).await?; - transport.setup(&_inner, None).await?; - - let (server, writer) = connection::split(&_inner, transport, false, spawn_reader_task)?; - inner.notifications.broadcast_reconnect(server.clone()); - _new_writers.lock().insert(server, writer); - Ok::<_, RedisError>(()) - }); - } + let commands = writer.close().await; + buffer.extend(commands); + } - let _ = try_join_all(connections_ft).await?; - for (server, writer) in new_writers.lock().drain() { - writers.insert(server, writer); - } + let mut connections_ft = Vec::with_capacity(changes.add.len()); + let new_writers = RefCount::new(Mutex::new(HashMap::with_capacity(changes.add.len()))); + // connect to each of the new nodes concurrently + for server in changes.add.into_iter() { + let _inner = inner.clone(); + let _new_writers = new_writers.clone(); + connections_ft.push(async move { + _debug!(inner, "Connecting to cluster node {}", server); + let mut transport = connection::create(&_inner, &server, None).await?; + transport.setup(&_inner, None).await?; + let connection = transport.into_pipelined(false); + inner.notifications.broadcast_reconnect(server.clone()); + _new_writers.lock().insert(server, connection); + Ok::<_, Error>(()) + }); + } - _debug!(inner, "Finish synchronizing cluster connections."); - } else { - return Err(RedisError::new( - RedisErrorKind::Config, - "Expected clustered connections.", - )); + let _ = try_join_all(connections_ft).await?; + let mut server_version = None; + for (server, writer) in new_writers.lock().drain() { + server_version = writer.version.clone(); + connections.insert(server, writer); } - if let Some(version) = connections.server_version() { + _debug!(inner, "Finish synchronizing cluster connections."); + if let Some(version) = server_version { inner.server_state.write().kind.set_server_version(version); } Ok(()) @@ -760,12 +584,15 @@ pub async fn sync( /// Initialize fresh connections to the server, dropping any old connections and saving in-flight commands on /// `buffer`. pub async fn initialize_connections( - inner: &RefCount, + inner: &RefCount, connections: &mut Connections, - buffer: &mut VecDeque, -) -> Result<(), RedisError> { - let commands = connections.disconnect_all(inner).await; - _trace!(inner, "Adding {} commands to retry buffer.", commands.len()); - buffer.extend(commands); - sync(inner, connections, buffer).await + buffer: &mut VecDeque, +) -> Result<(), Error> { + match connections { + Connections::Clustered { + connections: ref mut writers, + ref mut cache, + } => sync(inner, writers, cache, buffer).await, + _ => Err(Error::new(ErrorKind::Config, "Expected clustered config.")), + } } diff --git a/src/router/commands.rs b/src/router/commands.rs index 91c06019..7c41f8f4 100644 --- a/src/router/commands.rs +++ b/src/router/commands.rs @@ -1,356 +1,255 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::{CommandReceiver, RedisClientInner}, - protocol::command::{ - RedisCommand, - RedisCommandKind, - ResponseSender, - RouterCommand, - RouterReceiver, - RouterResponse, + error::{Error, ErrorKind}, + modules::inner::{ClientInner, CommandReceiver}, + protocol::command::{Command, CommandKind, ResponseSender, RouterCommand}, + router::{clustered, utils, Router}, + runtime::{sleep, OneshotSender, RefCount}, + types::{ + config::{Blocking, Server}, + ClientState, + ClientUnblockFlag, + ClusterHash, }, - router::{utils, Backpressure, Router, Written}, - runtime::{OneshotSender, RefCount}, - types::{Blocking, ClientState, ClientUnblockFlag, ClusterHash, Server}, utils as client_utils, }; use redis_protocol::resp3::types::BytesFrame as Resp3Frame; +use tokio::pin; +#[cfg(feature = "replicas")] +use crate::interfaces; #[cfg(feature = "transactions")] use crate::router::transactions; #[cfg(feature = "full-tracing")] use tracing_futures::Instrument; -/// Wait for the response from the reader task, handling cluster redirections if needed. -/// -/// The command is returned if it failed to write but could be immediately retried. -/// -/// Errors from this function should end the connection task. -async fn handle_router_response( - inner: &RefCount, +#[cfg(feature = "replicas")] +async fn create_replica_connection( + inner: &RefCount, router: &mut Router, - rx: Option, -) -> Result, RedisError> { - if let Some(rx) = rx { - _debug!(inner, "Waiting on router channel."); - let response = match rx.await { - Ok(response) => response, - Err(e) => { - _warn!(inner, "Dropped router response channel with error: {:?}", e); - return Ok(None); - }, - }; - - _debug!(inner, "Recv router response."); - match response { - RouterResponse::Continue => Ok(None), - RouterResponse::Ask((slot, server, mut command)) => { - if let Err(e) = command.decr_check_redirections() { - command.respond_to_caller(Err(e)); - Ok(None) - } else { - utils::send_asking_with_policy(inner, router, &server, slot).await?; - command.hasher = ClusterHash::Custom(slot); - command.use_replica = false; + mut command: Command, +) -> Result<(), Error> { + if command.use_replica && inner.connection.replica.lazy_connections { + let (primary, replica) = match utils::route_replica(router, &command) { + Ok((primary, replica)) => (primary, replica), + Err(err) => { + if inner.connection.replica.primary_fallback { command.attempts_remaining += 1; - Ok(Some(command)) - } - }, - RouterResponse::Moved((slot, server, mut command)) => { - // check if slot belongs to server, if not then run sync cluster - if !router.cluster_node_owns_slot(slot, &server) { - utils::sync_cluster_with_policy(inner, router).await?; - } - - if let Err(e) = command.decr_check_redirections() { - command.finish(inner, Err(e)); - Ok(None) - } else { - command.hasher = ClusterHash::Custom(slot); command.use_replica = false; - command.attempts_remaining += 1; - Ok(Some(command)) - } - }, - RouterResponse::ConnectionClosed((error, command)) => { - let command = if command.should_finish_with_error(inner) { - command.finish(inner, Err(error.clone())); - None + interfaces::send_to_router(inner, command.into())?; } else { - Some(command) - }; + command.respond_to_caller(Err(err)); + } - utils::reconnect_with_policy(inner, router).await?; - Ok(command) - }, - RouterResponse::TransactionError(_) | RouterResponse::TransactionResult(_) => { - _error!(inner, "Unexpected transaction response. This is a bug."); - Err(RedisError::new( - RedisErrorKind::Unknown, - "Invalid transaction response.", - )) + return Ok(()); }, + }; + + if let Err(err) = utils::add_replica_with_policy(inner, router, &primary, &replica).await { + if inner.connection.replica.ignore_reconnection_errors { + _warn!( + inner, + "Failed to connect to replica, ignoring and trying with primary node: {}", + err + ); + + command.attempts_remaining += 1; + command.use_replica = false; + interfaces::send_to_router(inner, command.into()) + } else { + command.respond_to_caller(Err(err.clone())); + Err(err) + } + } else { + // connected successfully + command.attempts_remaining += 1; + interfaces::send_to_router(inner, command.into()) } + } else if command.use_replica && !inner.connection.replica.lazy_connections { + // connection does not exist and the client is not configured to create more + if inner.connection.replica.primary_fallback { + command.attempts_remaining += 1; + command.use_replica = false; + interfaces::send_to_router(inner, command.into())?; + } else { + command.respond_to_caller(Err(Error::new( + ErrorKind::Routing, + "Failed to route command to replica.", + ))); + } + Ok(()) } else { - Ok(None) + // connection does not exist + _debug!(inner, "Failed to route command to replica. Deferring reconnection..."); + let err = Error::new(ErrorKind::Routing, "Failed to route command."); + command.attempts_remaining += 1; + finish_or_retry_command(router, command, &err); + utils::defer_reconnection(inner, router, None, err, false)?; + Ok(()) } } -/// Continuously write the command until it is sent, queued to try later, or fails with a fatal error. -async fn write_with_backpressure( - inner: &RefCount, - router: &mut Router, - command: RedisCommand, - force_pipeline: bool, -) -> Result<(), RedisError> { - Box::pin(async { - _trace!(inner, "Writing command: {:?}", command); - - let mut _command: Option = Some(command); - let mut _backpressure: Option = None; - loop { - let mut command = match _command.take() { - Some(command) => command, - None => return Err(RedisError::new(RedisErrorKind::Unknown, "Missing command.")), - }; - if let Err(e) = command.decr_check_attempted() { - command.finish(inner, Err(e)); - break; - } +fn finish_or_retry_command(router: &mut Router, mut command: Command, error: &Error) { + if command.attempts_remaining == 0 { + command.respond_to_caller(Err(error.clone())); + } else { + router.retry_command(command); + } +} - // apply backpressure first if needed. as a part of that check we may decide to block on the next command. - let router_rx = match _backpressure { - Some(backpressure) => match backpressure.wait(inner, &mut command).await { - Ok(Some(rx)) => Some(rx), - Ok(None) => { - if command.should_auto_pipeline(inner, force_pipeline) { - None - } else { - Some(command.create_router_channel()) - } - }, - Err(e) => { - command.respond_to_caller(Err(e)); - return Ok(()); - }, - }, - None => { - if command.should_auto_pipeline(inner, force_pipeline) { - None - } else { - Some(command.create_router_channel()) - } - }, - }; - let closes_connection = command.kind.closes_connection(); - let is_blocking = command.blocks_connection(); - let use_replica = command.use_replica; +/// Write the command to a connection. +pub async fn write_command( + inner: &RefCount, + router: &mut Router, + mut command: Command, +) -> Result<(), Error> { + _trace!(inner, "Writing command: {:?} ({})", command, command.debug_id()); + if let Err(err) = command.decr_check_attempted() { + command.respond_to_caller(Err(err)); + return Ok(()); + } + let closes_connection = command.kind.closes_connection(); + let is_blocking = command.blocks_connection(); + #[cfg(feature = "replicas")] + let use_replica = command.use_replica; + #[cfg(not(feature = "replicas"))] + let use_replica = false; - let result = if use_replica { - router.write_replica(command, false).await + if closes_connection { + router.drain_all(inner).await?; + } + // TODO refactor this + let (flush, disconnect_from) = { + if command.is_all_cluster_nodes() { + if let Err(err) = router.drain_all(inner).await { + router.disconnect_all(inner).await; + finish_or_retry_command(router, command, &err); + utils::defer_reconnection(inner, router, None, err, use_replica)?; + (false, None) } else { - router.write(command, false).await - }; - - match result { - Written::Backpressure((mut command, backpressure)) => { - _debug!(inner, "Recv backpressure again for {}.", command.kind.to_str_debug()); - // backpressure doesn't count as a write attempt - command.attempts_remaining += 1; - _command = Some(command); - _backpressure = Some(backpressure); - - continue; - }, - Written::Disconnected((server, command, error)) => { - _debug!(inner, "Handle disconnect for {:?} due to {:?}", server, error); - let commands = router.connections.disconnect(inner, server.as_ref()).await; - router.buffer_commands(commands); - if let Some(command) = command { - if command.should_finish_with_error(inner) { - command.finish(inner, Err(error)); - } else { - router.buffer_command(command); - } - } - - utils::defer_reconnect(inner); - break; - }, - Written::NotFound(mut command) => { - if let Err(e) = command.decr_check_redirections() { - command.finish(inner, Err(e)); - utils::defer_reconnect(inner); - break; - } + if let Err(err) = clustered::send_all_cluster_command(inner, router, command).await { + router.disconnect_all(inner).await; + utils::defer_reconnection(inner, router, None, err, use_replica)?; + } - _debug!(inner, "Perform cluster sync after missing hash slot lookup."); - if let Err(error) = router.sync_cluster().await { - // try to sync the cluster once, and failing that buffer the command. - _warn!(inner, "Failed to sync cluster after NotFound: {:?}", error); - utils::defer_reconnect(inner); - router.buffer_command(command); - utils::delay_cluster_sync(inner).await?; - break; - } else { - _command = Some(command); - _backpressure = None; - continue; + (false, None) + } + } else { + let conn = match router.route(&command) { + Some(conn) => conn, + None => { + #[cfg(feature = "replicas")] + return Box::pin(create_replica_connection(inner, router, command)).await; + + #[cfg(not(feature = "replicas"))] + { + let err = Error::new(ErrorKind::Unknown, "Failed to route command."); + finish_or_retry_command(router, command, &err); + utils::defer_reconnection(inner, router, None, err, use_replica)?; + return Ok(()); } }, - Written::Ignore => { - _trace!(inner, "Ignore `Written` response."); - break; - }, - Written::SentAll => { - _trace!(inner, "Sent command to all servers."); - let _ = router.check_and_flush().await; - if let Some(command) = handle_router_response(inner, router, router_rx).await? { - // commands that are sent to all nodes are not retried after a connection closing - _warn!(inner, "Responding with canceled error after all nodes command failure."); - command.finish(inner, Err(RedisError::new_canceled())); - break; - } else { - if closes_connection { - _trace!(inner, "Ending command loop after QUIT or SHUTDOWN."); - return Err(RedisError::new_canceled()); - } + }; - break; - } - }, - Written::Sent((server, flushed)) => { - _trace!(inner, "Sent command to {}. Flushed: {}", server, flushed); + match utils::write_command(inner, conn, command, false).await { + Ok(flushed) => { + _trace!(inner, "Sent command to {}. Flushed: {}", conn.server, flushed); if is_blocking { - inner.backchannel.write().await.set_blocked(&server); + inner.backchannel.set_blocked(&conn.server); } - if !flushed { - let _ = router.check_and_flush().await; + if !flushed && inner.counters.read_cmd_buffer_len() == 0 { + let _ = conn.flush().await; } + // interrupt the command that was just sent if another command is queued after sending this one let should_interrupt = is_blocking && inner.counters.read_cmd_buffer_len() > 0 && inner.config.blocking == Blocking::Interrupt; if should_interrupt { - // if there's other commands in the queue then interrupt the command that was just sent + let _ = conn.flush().await; _debug!(inner, "Interrupt after write."); if let Err(e) = client_utils::interrupt_blocked_connection(inner, ClientUnblockFlag::Error).await { _warn!(inner, "Failed to unblock connection: {:?}", e); } } - if let Some(command) = handle_router_response(inner, router, router_rx).await? { - _command = Some(command); - _backpressure = None; - continue; + if closes_connection { + _trace!(inner, "Ending command loop after QUIT or SHUTDOWN."); + return Err(Error::new_canceled()); } else { - if closes_connection { - _trace!(inner, "Ending command loop after QUIT or SHUTDOWN."); - return Err(RedisError::new_canceled()); - } - - break; + (flushed, None) } }, - Written::Error((error, command)) => { - _debug!(inner, "Fatal error writing command: {:?}", error); - if let Some(command) = command { - command.finish(inner, Err(error.clone())); - } - inner.notifications.broadcast_error(error.clone()); - - utils::defer_reconnect(inner); - return Err(error); - }, - #[cfg(feature = "replicas")] - Written::Fallback(command) => { - _error!( - inner, - "Unexpected replica response to {} ({})", - command.kind.to_str_debug(), - command.debug_id() - ); - command.finish( - inner, - Err(RedisError::new(RedisErrorKind::Replica, "Unexpected replica response.")), - ); - break; - }, + Err((err, command)) => (false, Some((conn.server.clone(), err, command))), } } + }; + if flush { + if let Err(err) = router.flush().await { + _debug!(inner, "Failed to flush connections: {:?}", err); + } + } + if let Some((server, err, command)) = disconnect_from { + if let Some(command) = command { + finish_or_retry_command(router, command, &err); + } + utils::drop_connection(inner, router, &server, &err).await; + utils::defer_reconnection(inner, router, None, err, use_replica) + } else { Ok(()) - }) - .await + } } #[cfg(feature = "full-tracing")] -async fn write_with_backpressure_t( - inner: &RefCount, - router: &mut Router, - mut command: RedisCommand, - force_pipeline: bool, -) -> Result<(), RedisError> { - if inner.should_trace() { - command.take_queued_span(); - let span = fspan!(command, inner.full_tracing_span_level(), "fred.write"); - write_with_backpressure(inner, router, command, force_pipeline) - .instrument(span) - .await - } else { - write_with_backpressure(inner, router, command, force_pipeline).await - } +macro_rules! write_command_t { + ($inner:ident, $router:ident, $command:ident) => { + if $inner.should_trace() { + $command.take_queued_span(); + let span = fspan!($command, $inner.full_tracing_span_level(), "fred.write"); + Box::pin(write_command($inner, $router, $command)) + .instrument(span) + .await + } else { + Box::pin(write_command($inner, $router, $command)).await + } + }; } #[cfg(not(feature = "full-tracing"))] -async fn write_with_backpressure_t( - inner: &RefCount, - router: &mut Router, - command: RedisCommand, - force_pipeline: bool, -) -> Result<(), RedisError> { - write_with_backpressure(inner, router, command, force_pipeline).await +macro_rules! write_command_t { + ($inner:ident, $router:ident, $command:ident) => { + Box::pin(write_command($inner, $router, $command)).await + }; } /// Run a pipelined series of commands, queueing commands to run later if needed. async fn process_pipeline( - inner: &RefCount, + inner: &RefCount, router: &mut Router, - commands: Vec, -) -> Result<(), RedisError> { + commands: Vec, +) -> Result<(), Error> { _debug!(inner, "Writing pipeline with {} commands", commands.len()); for mut command in commands.into_iter() { // trying to pipeline `SSUBSCRIBE` is problematic since successful responses arrive out-of-order via pubsub push // frames, but error redirections are returned in-order and the client is expected to follow them. this makes it - // very difficult to accurately associate redirections with `ssubscribe` calls within a pipeline. to avoid this we + // difficult to accurately associate redirections with `ssubscribe` calls within a pipeline. to avoid this we // never pipeline `ssubscribe`, even if the caller asks. - let force_pipeline = if command.kind == RedisCommandKind::Ssubscribe { - command.can_pipeline = false; - false - } else { - command.can_pipeline = true; - !command.is_all_cluster_nodes() - }; - command.skip_backpressure = true; + command.can_pipeline = command.kind != CommandKind::Ssubscribe; - if let Err(e) = write_with_backpressure_t(inner, router, command, force_pipeline).await { - // if the command cannot be written it will be queued to run later. - // if a connection is dropped due to an error the reader will send a command to reconnect and retry later. - _debug!(inner, "Error writing command in pipeline: {:?}", e); - } + write_command_t!(inner, router, command)?; } Ok(()) } -/// Send ASKING to the provided server, then retry the provided command. +/// Send `ASKING` to the provided server, then retry the provided command. async fn process_ask( - inner: &RefCount, + inner: &RefCount, router: &mut Router, server: Server, slot: u16, - mut command: RedisCommand, -) -> Result<(), RedisError> { + mut command: Command, +) -> Result<(), Error> { command.use_replica = false; command.hasher = ClusterHash::Custom(slot); @@ -358,11 +257,21 @@ async fn process_ask( command.respond_to_caller(Err(e)); return Ok(()); } - if let Err(e) = utils::send_asking_with_policy(inner, router, &server, slot).await { + let attempts_remaining = command.attempts_remaining; + let asking_result = Box::pin(utils::send_asking_with_policy( + inner, + router, + &server, + slot, + attempts_remaining, + )) + .await; + if let Err(e) = asking_result { command.respond_to_caller(Err(e.clone())); return Err(e); } - if let Err(error) = write_with_backpressure_t(inner, router, command, false).await { + + if let Err(error) = write_command_t!(inner, router, command) { _debug!(inner, "Error sending command after ASKING: {:?}", error); Err(error) } else { @@ -372,16 +281,16 @@ async fn process_ask( /// Sync the cluster state then retry the command. async fn process_moved( - inner: &RefCount, + inner: &RefCount, router: &mut Router, server: Server, slot: u16, - mut command: RedisCommand, -) -> Result<(), RedisError> { + mut command: Command, +) -> Result<(), Error> { command.use_replica = false; command.hasher = ClusterHash::Custom(slot); - utils::delay_cluster_sync(inner).await?; + utils::delay_cluster_sync(inner, router).await?; _debug!(inner, "Syncing cluster after MOVED {} {}", slot, server); if let Err(e) = utils::sync_cluster_with_policy(inner, router).await { command.respond_to_caller(Err(e.clone())); @@ -391,7 +300,8 @@ async fn process_moved( command.respond_to_caller(Err(e)); return Ok(()); } - if let Err(error) = write_with_backpressure_t(inner, router, command, false).await { + + if let Err(error) = write_command_t!(inner, router, command) { _debug!(inner, "Error sending command after MOVED: {:?}", error); Err(error) } else { @@ -401,13 +311,15 @@ async fn process_moved( #[cfg(feature = "replicas")] async fn process_replica_reconnect( - inner: &RefCount, + inner: &RefCount, router: &mut Router, server: Option, force: bool, tx: Option, replica: bool, -) -> Result<(), RedisError> { +) -> Result<(), Error> { + router.reset_pending_reconnection(server.as_ref()); + #[allow(unused_mut)] if replica { let result = utils::sync_replicas_with_policy(inner, router, false).await; @@ -417,23 +329,24 @@ async fn process_replica_reconnect( Ok(()) } else { - process_reconnect(inner, router, server, force, tx).await + Box::pin(process_reconnect(inner, router, server, force, tx)).await } } /// Reconnect to the server(s). #[allow(unused_mut)] async fn process_reconnect( - inner: &RefCount, + inner: &RefCount, router: &mut Router, server: Option, force: bool, tx: Option, -) -> Result<(), RedisError> { +) -> Result<(), Error> { _debug!(inner, "Maybe reconnecting to {:?} (force: {})", server, force); + router.reset_pending_reconnection(server.as_ref()); if let Some(server) = server { - let has_connection = router.connections.has_server_connection(&server); + let has_connection = router.connections.has_server_connection(&server).await; _debug!(inner, "Has working connection: {}", has_connection); if has_connection && !force { @@ -446,7 +359,7 @@ async fn process_reconnect( } } - if !force && router.has_healthy_centralized_connection() { + if !force && router.has_healthy_centralized_connection().await { _debug!(inner, "Skip reconnecting to centralized host"); if let Some(mut tx) = tx { let _ = tx.send(Ok(Resp3Frame::Null)); @@ -455,7 +368,7 @@ async fn process_reconnect( } _debug!(inner, "Starting reconnection loop..."); - if let Err(e) = utils::reconnect_with_policy(inner, router).await { + if let Err(e) = Box::pin(utils::reconnect_with_policy(inner, router)).await { if let Some(mut tx) = tx { let _ = tx.send(Err(e.clone())); } @@ -473,11 +386,11 @@ async fn process_reconnect( #[cfg(feature = "replicas")] #[allow(unused_mut)] async fn process_sync_replicas( - inner: &RefCount, + inner: &RefCount, router: &mut Router, - mut tx: OneshotSender>, + mut tx: OneshotSender>, reset: bool, -) -> Result<(), RedisError> { +) -> Result<(), Error> { let result = utils::sync_replicas_with_policy(inner, router, reset).await; let _ = tx.send(result); Ok(()) @@ -486,68 +399,36 @@ async fn process_sync_replicas( /// Sync and update the cached cluster state. #[allow(unused_mut)] async fn process_sync_cluster( - inner: &RefCount, + inner: &RefCount, router: &mut Router, - mut tx: OneshotSender>, -) -> Result<(), RedisError> { + mut tx: OneshotSender>, +) -> Result<(), Error> { let result = utils::sync_cluster_with_policy(inner, router).await; let _ = tx.send(result.clone()); result } -/// Send a single command to the server(s). -async fn process_normal_command( - inner: &RefCount, - router: &mut Router, - command: RedisCommand, -) -> Result<(), RedisError> { - write_with_backpressure_t(inner, router, command, false).await -} - -/// Read the set of active connections managed by the client. -#[allow(unused_mut)] -fn process_connections( - inner: &RefCount, - router: &Router, - mut tx: OneshotSender>, -) -> Result<(), RedisError> { - #[allow(unused_mut)] - let mut connections = router.connections.active_connections(); - #[cfg(feature = "replicas")] - connections.extend(router.replicas.writers.keys().cloned()); - - _debug!(inner, "Active connections: {:?}", connections); - let _ = tx.send(connections); - Ok(()) -} - -/// Process any kind of router command. +/// Start processing commands from the client front end. async fn process_command( - inner: &RefCount, + inner: &RefCount, router: &mut Router, command: RouterCommand, -) -> Result<(), RedisError> { +) -> Result<(), Error> { + inner.counters.decr_cmd_buffer_len(); + + _trace!(inner, "Recv command: {:?}", command); match command { - RouterCommand::Ask { server, slot, command } => process_ask(inner, router, server, slot, command).await, - RouterCommand::Moved { server, slot, command } => process_moved(inner, router, server, slot, command).await, RouterCommand::SyncCluster { tx } => process_sync_cluster(inner, router, tx).await, #[cfg(feature = "transactions")] RouterCommand::Transaction { commands, - pipelined, id, tx, abort_on_error, - } => { - if pipelined { - transactions::exec::pipelined(inner, router, commands, id, tx).await - } else { - transactions::exec::non_pipelined(inner, router, commands, id, abort_on_error, tx).await - } - }, + } => Box::pin(transactions::send(inner, router, commands, id, abort_on_error, tx)).await, RouterCommand::Pipeline { commands } => process_pipeline(inner, router, commands).await, - RouterCommand::Command(command) => process_normal_command(inner, router, command).await, - RouterCommand::Connections { tx } => process_connections(inner, router, tx), + #[allow(unused_mut)] + RouterCommand::Command(mut command) => write_command_t!(inner, router, command), #[cfg(feature = "replicas")] RouterCommand::SyncReplicas { tx, reset } => process_sync_replicas(inner, router, tx, reset).await, #[cfg(not(feature = "replicas"))] @@ -559,41 +440,69 @@ async fn process_command( tx, replica, } => process_replica_reconnect(inner, router, server, force, tx, replica).await, + RouterCommand::Ask { server, slot, command } => process_ask(inner, router, server, slot, command).await, + RouterCommand::Moved { command, server, slot } => process_moved(inner, router, server, slot, command).await, } } -/// Start processing commands from the client front end. -async fn process_commands( - inner: &RefCount, +/// Try to read frames from any socket, otherwise try to write the next command. +async fn read_or_write( + inner: &RefCount, router: &mut Router, rx: &mut CommandReceiver, -) -> Result<(), RedisError> { - _debug!(inner, "Starting command processing stream..."); - while let Some(command) = rx.recv().await { - inner.counters.decr_cmd_buffer_len(); - - _trace!(inner, "Recv command: {:?}", command); - if let Err(e) = process_command(inner, router, command).await { - // errors on this interface end the client connection task - if e.is_canceled() { - break; - } else { - _error!(inner, "Disconnecting after error processing command: {:?}", e); - let _ = router.disconnect_all().await; - router.clear_retry_buffer(); - return Err(e); +) -> Result<(), Error> { + // The most complicated part of the main client command loop is implemented in this function. + // + // In the past `fred` worked by spawning a separate task for each connection such that the Tokio scheduler could + // read from all sockets concurrently. Unfortunately this introduced significant overhead in the scheduling + // layer (via a huge number of calls to `next_expiration` within Tokio) and indirectly via the added message passing + // communication mechanisms required between reader tasks and the writer task. + // + // In 9.5.0 the routing layer was reworked to operate on both readers and writers within a single task. This + // increased throughput by 2-3x on the happy path, but requires some `select` shenanigans so that the client can + // appear to operate on readers and writers concurrently. + // + // This function is called in a loop and drives futures that concurrently read and write to sockets. + + if inner.connection.unresponsive.max_timeout.is_some() { + let sleep_ft = sleep(inner.connection.unresponsive.interval); + pin!(sleep_ft); + + tokio::select! { + biased; + results = router.select_read(inner) => { + for (server, result) in results.into_iter() { + utils::process_response(inner, router, &server, result).await?; + } + }, + Some(command) = rx.recv() => { + process_command(inner, router, command).await?; + }, + _ = sleep_ft => { + // break out and return early, starting another call to poll_next on all the sockets, + // which also performs unresponsive checks on each socket + return Ok(()); } - } + }; + } else { + tokio::select! { + biased; + results = router.select_read(inner) => { + for (server, result) in results.into_iter() { + utils::process_response(inner, router, &server, result).await?; + } + }, + Some(command) = rx.recv() => { + process_command(inner, router, command).await?; + }, + }; } - _debug!(inner, "Disconnecting after command stream closes."); - let _ = router.disconnect_all().await; - router.clear_retry_buffer(); Ok(()) } /// Start the command processing stream, initiating new connections in the process. -pub async fn start(inner: &RefCount) -> Result<(), RedisError> { +pub async fn start(inner: &RefCount) -> Result<(), Error> { #[cfg(feature = "mocks")] if let Some(ref mocks) = inner.config.mocks { return mocking::start(inner, mocks).await; @@ -604,8 +513,8 @@ pub async fn start(inner: &RefCount) -> Result<(), RedisError> None => { // the `_lock` field on inner synchronizes the getters/setters on the command channel halves, so if this field // is None then another task must have set and removed the receiver concurrently. - return Err(RedisError::new( - RedisErrorKind::Config, + return Err(Error::new( + ErrorKind::Config, "Another connection task is already running.", )); }, @@ -615,17 +524,17 @@ pub async fn start(inner: &RefCount) -> Result<(), RedisError> let mut router = Router::new(inner); _debug!(inner, "Initializing router with policy: {:?}", inner.reconnect_policy()); let result = if inner.config.fail_fast { - if let Err(e) = Box::pin(router.connect()).await { + if let Err(e) = Box::pin(router.connect(inner)).await { inner.notifications.broadcast_connect(Err(e.clone())); - inner.notifications.broadcast_error(e.clone()); + inner.notifications.broadcast_error(e.clone(), None); Err(e) } else { - client_utils::set_client_state(&inner.state, ClientState::Connected); + inner.set_client_state(ClientState::Connected); inner.notifications.broadcast_connect(Ok(())); Ok(()) } } else { - utils::reconnect_with_policy(inner, &mut router).await + Box::pin(utils::reconnect_with_policy(inner, &mut router)).await }; if let Err(error) = result { @@ -635,7 +544,19 @@ pub async fn start(inner: &RefCount) -> Result<(), RedisError> #[cfg(feature = "credential-provider")] inner.reset_credential_refresh_task(); - let result = Box::pin(process_commands(inner, &mut router, &mut rx)).await; + let mut result = Ok(()); + loop { + if let Err(err) = read_or_write(inner, &mut router, &mut rx).await { + _debug!(inner, "Error processing command: {:?}", err); + router.clear_retry_buffer(); + let _ = router.disconnect_all(inner).await; + + if !err.is_canceled() { + result = Err(err); + } + break; + } + } inner.store_command_rx(rx, false); #[cfg(feature = "credential-provider")] inner.abort_credential_refresh_task(); @@ -655,7 +576,7 @@ mod mocking { use std::sync::Arc; /// Process any kind of router command. - pub fn process_command(mocks: &Arc, command: RouterCommand) -> Result<(), RedisError> { + pub fn process_command(mocks: &Arc, command: RouterCommand) -> Result<(), Error> { match command { #[cfg(feature = "transactions")] RouterCommand::Transaction { commands, mut tx, .. } => { @@ -720,15 +641,15 @@ mod mocking { Ok(()) }, - _ => Err(RedisError::new(RedisErrorKind::Unknown, "Unimplemented.")), + _ => Err(Error::new(ErrorKind::Unknown, "Unimplemented.")), } } pub async fn process_commands( - inner: &RefCount, + inner: &RefCount, mocks: &Arc, rx: &mut CommandReceiver, - ) -> Result<(), RedisError> { + ) -> Result<(), Error> { while let Some(command) = rx.recv().await { inner.counters.decr_cmd_buffer_len(); @@ -747,7 +668,7 @@ mod mocking { Ok(()) } - pub async fn start(inner: &RefCount, mocks: &Arc) -> Result<(), RedisError> { + pub async fn start(inner: &RefCount, mocks: &Arc) -> Result<(), Error> { _debug!(inner, "Starting mocking layer"); #[cfg(feature = "glommio")] @@ -757,12 +678,7 @@ mod mocking { let mut rx = match inner.take_command_rx() { Some(rx) => rx, - None => { - return Err(RedisError::new( - RedisErrorKind::Config, - "Redis client is already initialized.", - )) - }, + None => return Err(Error::new(ErrorKind::Config, "Redis client is already initialized.")), }; inner.notifications.broadcast_connect(Ok(())); diff --git a/src/router/connections.rs b/src/router/connections.rs new file mode 100644 index 00000000..3382d909 --- /dev/null +++ b/src/router/connections.rs @@ -0,0 +1,389 @@ +use crate::{ + error::{Error, ErrorKind}, + modules::inner::ClientInner, + protocol::{ + command::Command, + connection, + connection::{Connection, Counters}, + types::ClusterRouting, + }, + router::{centralized, clustered, sentinel}, + runtime::RefCount, + types::config::Server, +}; +use futures::future::try_join_all; +use semver::Version; +use std::collections::{HashMap, VecDeque}; + +/// Connection maps for the supported deployment types. +pub enum Connections { + Centralized { + /// The connection to the primary server. + connection: Option, + }, + Clustered { + /// The cached cluster routing table used for mapping keys to server IDs. + cache: ClusterRouting, + /// A map of server IDs and connections. + connections: HashMap, + }, + Sentinel { + /// The connection to the primary server. + connection: Option, + }, +} + +impl Connections { + pub fn new_centralized() -> Self { + Connections::Centralized { connection: None } + } + + pub fn new_sentinel() -> Self { + Connections::Sentinel { connection: None } + } + + pub fn new_clustered() -> Self { + Connections::Clustered { + cache: ClusterRouting::new(), + connections: HashMap::new(), + } + } + + /// Discover and return a mapping of replica nodes to their associated primary node. + #[cfg(feature = "replicas")] + pub async fn replica_map(&mut self, inner: &RefCount) -> Result, Error> { + Ok(match self { + Connections::Centralized { + connection: ref mut writer, + } + | Connections::Sentinel { + connection: ref mut writer, + } => { + if let Some(writer) = writer { + connection::discover_replicas(inner, writer) + .await? + .into_iter() + .map(|replica| (replica, writer.server.clone())) + .collect() + } else { + HashMap::new() + } + }, + Connections::Clustered { + connections: ref writers, + .. + } => { + let mut out = HashMap::with_capacity(writers.len()); + + for primary in writers.keys() { + let replicas = inner + .with_cluster_state(|state| Ok(state.replicas(primary))) + .ok() + .unwrap_or_default(); + + for replica in replicas.into_iter() { + out.insert(replica, primary.clone()); + } + } + out + }, + }) + } + + /// Whether the connection map has a connection to the provided server`. + pub async fn has_server_connection(&mut self, server: &Server) -> bool { + match self { + Connections::Centralized { + connection: ref mut writer, + } + | Connections::Sentinel { + connection: ref mut writer, + } => { + if let Some(writer) = writer.as_mut() { + if writer.server == *server { + writer.peek_reader_errors().await.is_none() + } else { + false + } + } else { + false + } + }, + Connections::Clustered { + connections: ref mut writers, + .. + } => { + for (_, writer) in writers.iter_mut() { + if writer.server == *server { + return writer.peek_reader_errors().await.is_none(); + } + } + + false + }, + } + } + + /// Get the connection writer half for the provided server. + pub fn get_connection_mut(&mut self, server: &Server) -> Option<&mut Connection> { + match self { + Connections::Centralized { + connection: ref mut writer, + } => writer + .as_mut() + .and_then(|writer| if writer.server == *server { Some(writer) } else { None }), + Connections::Sentinel { + connection: ref mut writer, + } => writer + .as_mut() + .and_then(|writer| if writer.server == *server { Some(writer) } else { None }), + Connections::Clustered { + connections: ref mut writers, + .. + } => writers.get_mut(server), + } + } + + /// Initialize the underlying connection(s) and update the cached backchannel information. + pub async fn initialize( + &mut self, + inner: &RefCount, + buffer: &mut VecDeque, + ) -> Result<(), Error> { + let result = if inner.config.server.is_clustered() { + Box::pin(clustered::initialize_connections(inner, self, buffer)).await + } else if inner.config.server.is_centralized() || inner.config.server.is_unix_socket() { + Box::pin(centralized::initialize_connection(inner, self, buffer)).await + } else if inner.config.server.is_sentinel() { + Box::pin(sentinel::initialize_connection(inner, self, buffer)).await + } else { + return Err(Error::new(ErrorKind::Config, "Invalid client configuration.")); + }; + + if result.is_ok() { + if let Some(version) = self.server_version() { + inner.server_state.write().kind.set_server_version(version); + } + + inner.backchannel.update_connection_ids(self); + } + result + } + + /// Read the counters associated with a connection to a server. + pub fn counters(&self, server: Option<&Server>) -> Option<&Counters> { + match self { + Connections::Centralized { connection: ref writer } => writer.as_ref().map(|w| &w.counters), + Connections::Sentinel { + connection: ref writer, .. + } => writer.as_ref().map(|w| &w.counters), + Connections::Clustered { + connections: ref writers, + .. + } => server.and_then(|server| writers.get(server).map(|w| &w.counters)), + } + } + + /// Read the server version, if known. + pub fn server_version(&self) -> Option { + match self { + Connections::Centralized { connection: ref writer } => writer.as_ref().and_then(|w| w.version.clone()), + Connections::Clustered { + connections: ref writers, + .. + } => writers.iter().find_map(|(_, w)| w.version.clone()), + Connections::Sentinel { + connection: ref writer, .. + } => writer.as_ref().and_then(|w| w.version.clone()), + } + } + + pub fn take_connection(&mut self, server: Option<&Server>) -> Option { + match self { + Connections::Centralized { + connection: ref mut writer, + } => writer.take(), + Connections::Sentinel { + connection: ref mut writer, + .. + } => writer.take(), + Connections::Clustered { + connections: ref mut writers, + .. + } => server.and_then(|server| writers.remove(server)), + } + } + + /// Disconnect from the provided server, using the default centralized connection if `None` is provided. + pub async fn disconnect(&mut self, inner: &RefCount, server: Option<&Server>) -> VecDeque { + match self { + Connections::Centralized { + connection: ref mut writer, + } => { + if let Some(mut writer) = writer.take() { + _debug!(inner, "Disconnecting from {}", writer.server); + writer.close().await + } else { + VecDeque::new() + } + }, + Connections::Clustered { + connections: ref mut writers, + .. + } => { + let mut out = VecDeque::new(); + + if let Some(server) = server { + if let Some(mut writer) = writers.remove(server) { + _debug!(inner, "Disconnecting from {}", writer.server); + let commands = writer.close().await; + out.extend(commands); + } + } + out.into_iter().collect() + }, + Connections::Sentinel { + connection: ref mut writer, + } => { + if let Some(mut writer) = writer.take() { + _debug!(inner, "Disconnecting from {}", writer.server); + writer.close().await + } else { + VecDeque::new() + } + }, + } + } + + /// Disconnect and clear local state for all connections, returning all in-flight commands. + pub async fn disconnect_all(&mut self, inner: &RefCount) -> VecDeque { + match self { + Connections::Centralized { + connection: ref mut writer, + } => { + if let Some(mut writer) = writer.take() { + _debug!(inner, "Disconnecting from {}", writer.server); + writer.close().await + } else { + VecDeque::new() + } + }, + Connections::Clustered { + connections: ref mut writers, + .. + } => { + let mut out = VecDeque::new(); + for (_, mut writer) in writers.drain() { + _debug!(inner, "Disconnecting from {}", writer.server); + let commands = writer.close().await; + out.extend(commands.into_iter()); + } + out.into_iter().collect() + }, + Connections::Sentinel { + connection: ref mut writer, + } => { + if let Some(mut writer) = writer.take() { + _debug!(inner, "Disconnecting from {}", writer.server); + writer.close().await + } else { + VecDeque::new() + } + }, + } + } + + /// Read a map of connection IDs (via `CLIENT ID`) for each inner connections. + pub fn connection_ids(&self) -> HashMap { + let mut out = HashMap::new(); + + match self { + Connections::Centralized { connection: writer } => { + if let Some(writer) = writer { + if let Some(id) = writer.id { + out.insert(writer.server.clone(), id); + } + } + }, + Connections::Sentinel { connection: writer, .. } => { + if let Some(writer) = writer { + if let Some(id) = writer.id { + out.insert(writer.server.clone(), id); + } + } + }, + Connections::Clustered { + connections: writers, .. + } => { + for (server, writer) in writers.iter() { + if let Some(id) = writer.id { + out.insert(server.clone(), id); + } + } + }, + } + + out + } + + /// Flush the socket(s) associated with each server if they have pending frames. + pub async fn flush(&mut self) -> Result<(), Error> { + match self { + Connections::Centralized { + connection: ref mut writer, + } => { + if let Some(writer) = writer { + writer.flush().await + } else { + Ok(()) + } + }, + Connections::Sentinel { + connection: ref mut writer, + .. + } => { + if let Some(writer) = writer { + writer.flush().await + } else { + Ok(()) + } + }, + Connections::Clustered { + connections: ref mut writers, + .. + } => try_join_all(writers.values_mut().map(|writer| writer.flush())) + .await + .map(|_| ()), + } + } + + /// Check if the provided `server` node owns the provided `slot`. + pub fn check_cluster_owner(&self, slot: u16, server: &Server) -> bool { + match self { + Connections::Clustered { ref cache, .. } => cache + .get_server(slot) + .map(|owner| { + trace!("Comparing cached cluster owner for {}: {} == {}", slot, owner, server); + owner == server + }) + .unwrap_or(false), + _ => false, + } + } + + /// Connect or reconnect to the provided `host:port`. + pub async fn add_connection(&mut self, inner: &RefCount, server: &Server) -> Result<(), Error> { + if let Connections::Clustered { + connections: ref mut writers, + .. + } = self + { + let mut transport = connection::create(inner, server, None).await?; + transport.setup(inner, None).await?; + writers.insert(server.clone(), transport.into_pipelined(false)); + Ok(()) + } else { + Err(Error::new(ErrorKind::Config, "Expected clustered configuration.")) + } + } +} diff --git a/src/router/mod.rs b/src/router/mod.rs index 6cbf0c9b..bfc3dcc5 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -1,504 +1,54 @@ +pub mod centralized; +pub mod clustered; +pub mod commands; +pub mod connections; +#[cfg(feature = "replicas")] +pub mod replicas; +pub mod responses; +pub mod sentinel; +pub mod types; +pub mod utils; + use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, + error::Error, + modules::inner::ClientInner, protocol::{ - command::{RedisCommand, RouterReceiver}, - connection::{self, CommandBuffer, Counters, RedisWriter}, - types::{ClusterRouting, Server}, + command::Command, + connection::{Connection, Counters}, + types::Server, + }, + router::{ + connections::Connections, + types::{ReadAllFuture, ReadFuture}, }, runtime::RefCount, - trace, + types::Resp3Frame, utils as client_utils, }; -use futures::future::try_join_all; -use semver::Version; +use futures::future::join_all; use std::{ - collections::{HashMap, VecDeque}, - fmt, - fmt::Formatter, - time::Duration, + collections::{HashSet, VecDeque}, + hash::{Hash, Hasher}, }; -#[cfg(feature = "transactions")] -use crate::runtime::oneshot_channel; -#[cfg(feature = "transactions")] -use crate::{protocol::command::ClusterErrorKind, protocol::responders::ResponseKind}; #[cfg(feature = "replicas")] -use std::collections::HashSet; - -pub mod centralized; -pub mod clustered; -pub mod commands; -pub mod replicas; -pub mod responses; -pub mod sentinel; -pub mod types; -pub mod utils; - +use futures::future::try_join; #[cfg(feature = "transactions")] pub mod transactions; - #[cfg(feature = "replicas")] -use crate::router::replicas::Replicas; - -/// The result of an attempt to send a command to the server. -// This is not an ideal pattern, but it mostly comes from the requirement that the shared buffer interface take -// ownership over the command. -pub enum Written { - /// Apply backpressure to the command before retrying. - Backpressure((RedisCommand, Backpressure)), - /// Indicates that the command was sent to the associated server and whether the socket was flushed. - Sent((Server, bool)), - /// Indicates that the command was sent to all servers. - SentAll, - /// The command could not be written since the connection is down. - Disconnected((Option, Option, RedisError)), - /// Ignore the result and move on to the next command. - Ignore, - /// The command could not be routed to any server. - NotFound(RedisCommand), - /// A fatal error that should interrupt the router. - Error((RedisError, Option)), - /// Restart the write process on a primary node connection. - #[cfg(feature = "replicas")] - Fallback(RedisCommand), -} - -impl fmt::Display for Written { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", match self { - Written::Backpressure(_) => "Backpressure", - Written::Sent(_) => "Sent", - Written::SentAll => "SentAll", - Written::Disconnected(_) => "Disconnected", - Written::Ignore => "Ignore", - Written::NotFound(_) => "NotFound", - Written::Error(_) => "Error", - #[cfg(feature = "replicas")] - Written::Fallback(_) => "Fallback", - }) - } -} +use replicas::Replicas; -pub enum Backpressure { - /// The amount of time to wait. - Wait(Duration), - /// Block the client until the command receives a response. - Block, - /// Return a backpressure error to the caller of the command. - Error(RedisError), +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum ReconnectServer { + All, + One(Server), } -impl Backpressure { - /// Apply the backpressure policy. - pub async fn wait( - self, - inner: &RefCount, - command: &mut RedisCommand, - ) -> Result, RedisError> { +impl Hash for ReconnectServer { + fn hash(&self, state: &mut H) { match self { - Backpressure::Error(e) => Err(e), - Backpressure::Wait(duration) => { - _debug!(inner, "Backpressure policy (wait): {:?}", duration); - trace::backpressure_event(command, Some(duration.as_millis())); - inner.wait_with_interrupt(duration).await?; - Ok(None) - }, - Backpressure::Block => { - _debug!(inner, "Backpressure (block)"); - trace::backpressure_event(command, None); - if !command.has_router_channel() { - _trace!( - inner, - "Blocking router for backpressure for {}", - command.kind.to_str_debug() - ); - command.skip_backpressure = true; - Ok(Some(command.create_router_channel())) - } else { - Ok(None) - } - }, - } - } -} - -/// Connection maps for the supported deployment types. -pub enum Connections { - Centralized { - /// The connection to the primary server. - writer: Option, - }, - Clustered { - /// The cached cluster routing table used for mapping keys to server IDs. - cache: ClusterRouting, - /// A map of server IDs and connections. - writers: HashMap, - }, - Sentinel { - /// The connection to the primary server. - writer: Option, - }, -} - -impl Connections { - pub fn new_centralized() -> Self { - Connections::Centralized { writer: None } - } - - pub fn new_sentinel() -> Self { - Connections::Sentinel { writer: None } - } - - pub fn new_clustered() -> Self { - Connections::Clustered { - cache: ClusterRouting::new(), - writers: HashMap::new(), - } - } - - /// Discover and return a mapping of replica nodes to their associated primary node. - #[cfg(feature = "replicas")] - pub async fn replica_map( - &mut self, - inner: &RefCount, - ) -> Result, RedisError> { - Ok(match self { - Connections::Centralized { ref mut writer } | Connections::Sentinel { ref mut writer } => { - if let Some(writer) = writer { - writer - .discover_replicas(inner) - .await? - .into_iter() - .map(|replica| (replica, writer.server.clone())) - .collect() - } else { - HashMap::new() - } - }, - Connections::Clustered { ref writers, .. } => { - let mut out = HashMap::with_capacity(writers.len()); - - for primary in writers.keys() { - let replicas = inner - .with_cluster_state(|state| Ok(state.replicas(primary))) - .ok() - .unwrap_or_default(); - - for replica in replicas.into_iter() { - out.insert(replica, primary.clone()); - } - } - out - }, - }) - } - - /// Whether the connection map has a connection to the provided server`. - pub fn has_server_connection(&mut self, server: &Server) -> bool { - match self { - Connections::Centralized { ref mut writer } | Connections::Sentinel { ref mut writer } => { - if let Some(writer) = writer.as_mut() { - if writer.server == *server { - writer.is_working() - } else { - false - } - } else { - false - } - }, - Connections::Clustered { ref mut writers, .. } => { - for (_, writer) in writers.iter_mut() { - if writer.server == *server { - return writer.is_working(); - } - } - - false - }, - } - } - - /// Get the connection writer half for the provided server. - pub fn get_connection_mut(&mut self, server: &Server) -> Option<&mut RedisWriter> { - match self { - Connections::Centralized { ref mut writer } => { - writer - .as_mut() - .and_then(|writer| if writer.server == *server { Some(writer) } else { None }) - }, - Connections::Sentinel { ref mut writer } => { - writer - .as_mut() - .and_then(|writer| if writer.server == *server { Some(writer) } else { None }) - }, - Connections::Clustered { ref mut writers, .. } => writers.get_mut(server), - } - } - - /// Initialize the underlying connection(s) and update the cached backchannel information. - pub async fn initialize( - &mut self, - inner: &RefCount, - buffer: &mut VecDeque, - ) -> Result<(), RedisError> { - let result = if inner.config.server.is_clustered() { - Box::pin(clustered::initialize_connections(inner, self, buffer)).await - } else if inner.config.server.is_centralized() || inner.config.server.is_unix_socket() { - Box::pin(centralized::initialize_connection(inner, self, buffer)).await - } else if inner.config.server.is_sentinel() { - Box::pin(sentinel::initialize_connection(inner, self, buffer)).await - } else { - return Err(RedisError::new(RedisErrorKind::Config, "Invalid client configuration.")); - }; - - // TODO clean this up - if result.is_ok() { - if let Some(version) = self.server_version() { - inner.server_state.write().kind.set_server_version(version); - } - - let mut backchannel = inner.backchannel.write().await; - backchannel.connection_ids = self.connection_ids(); - } - result - } - - /// Read the counters associated with a connection to a server. - pub fn counters(&self, server: Option<&Server>) -> Option<&Counters> { - match self { - Connections::Centralized { ref writer } => writer.as_ref().map(|w| &w.counters), - Connections::Sentinel { ref writer, .. } => writer.as_ref().map(|w| &w.counters), - Connections::Clustered { ref writers, .. } => { - server.and_then(|server| writers.get(server).map(|w| &w.counters)) - }, - } - } - - /// Read the server version, if known. - pub fn server_version(&self) -> Option { - match self { - Connections::Centralized { ref writer } => writer.as_ref().and_then(|w| w.version.clone()), - Connections::Clustered { ref writers, .. } => writers.iter().find_map(|(_, w)| w.version.clone()), - Connections::Sentinel { ref writer, .. } => writer.as_ref().and_then(|w| w.version.clone()), - } - } - - /// Disconnect from the provided server, using the default centralized connection if `None` is provided. - pub async fn disconnect(&mut self, inner: &RefCount, server: Option<&Server>) -> CommandBuffer { - match self { - Connections::Centralized { ref mut writer } => { - if let Some(writer) = writer.take() { - _debug!(inner, "Disconnecting from {}", writer.server); - writer.graceful_close().await - } else { - Vec::new() - } - }, - Connections::Clustered { ref mut writers, .. } => { - let mut out = VecDeque::new(); - - if let Some(server) = server { - if let Some(writer) = writers.remove(server) { - _debug!(inner, "Disconnecting from {}", writer.server); - let commands = writer.graceful_close().await; - out.extend(commands); - } - } - out.into_iter().collect() - }, - Connections::Sentinel { ref mut writer } => { - if let Some(writer) = writer.take() { - _debug!(inner, "Disconnecting from {}", writer.server); - writer.graceful_close().await - } else { - Vec::new() - } - }, - } - } - - /// Disconnect and clear local state for all connections, returning all in-flight commands. - pub async fn disconnect_all(&mut self, inner: &RefCount) -> CommandBuffer { - match self { - Connections::Centralized { ref mut writer } => { - if let Some(writer) = writer.take() { - _debug!(inner, "Disconnecting from {}", writer.server); - writer.graceful_close().await - } else { - Vec::new() - } - }, - Connections::Clustered { ref mut writers, .. } => { - let mut out = VecDeque::new(); - for (_, writer) in writers.drain() { - _debug!(inner, "Disconnecting from {}", writer.server); - let commands = writer.graceful_close().await; - out.extend(commands.into_iter()); - } - out.into_iter().collect() - }, - Connections::Sentinel { ref mut writer } => { - if let Some(writer) = writer.take() { - _debug!(inner, "Disconnecting from {}", writer.server); - writer.graceful_close().await - } else { - Vec::new() - } - }, - } - } - - /// Read a map of connection IDs (via `CLIENT ID`) for each inner connections. - pub fn connection_ids(&self) -> HashMap { - let mut out = HashMap::new(); - - match self { - Connections::Centralized { writer } => { - if let Some(writer) = writer { - if let Some(id) = writer.id { - out.insert(writer.server.clone(), id); - } - } - }, - Connections::Sentinel { writer, .. } => { - if let Some(writer) = writer { - if let Some(id) = writer.id { - out.insert(writer.server.clone(), id); - } - } - }, - Connections::Clustered { writers, .. } => { - for (server, writer) in writers.iter() { - if let Some(id) = writer.id { - out.insert(server.clone(), id); - } - } - }, - } - - out - } - - /// Flush the socket(s) associated with each server if they have pending frames. - pub async fn check_and_flush(&mut self, inner: &RefCount) -> Result<(), RedisError> { - _trace!(inner, "Checking and flushing sockets..."); - - match self { - Connections::Centralized { ref mut writer } => { - if let Some(writer) = writer { - writer.flush().await - } else { - Ok(()) - } - }, - Connections::Sentinel { ref mut writer, .. } => { - if let Some(writer) = writer { - writer.flush().await - } else { - Ok(()) - } - }, - Connections::Clustered { ref mut writers, .. } => { - try_join_all(writers.values_mut().map(|writer| writer.flush())) - .await - .map(|_| ()) - }, - } - } - - /// Send a command to the server(s). - pub async fn write( - &mut self, - inner: &RefCount, - command: RedisCommand, - force_flush: bool, - ) -> Written { - match self { - Connections::Clustered { - ref mut writers, - ref mut cache, - } => clustered::write(inner, writers, cache, command, force_flush).await, - Connections::Centralized { ref mut writer } => centralized::write(inner, writer, command, force_flush).await, - Connections::Sentinel { ref mut writer, .. } => centralized::write(inner, writer, command, force_flush).await, - } - } - - /// Send a command to all servers in a cluster. - pub async fn write_all_cluster(&mut self, inner: &RefCount, command: RedisCommand) -> Written { - if let Connections::Clustered { ref mut writers, .. } = self { - if let Err(error) = clustered::send_all_cluster_command(inner, writers, command).await { - Written::Disconnected((None, None, error)) - } else { - Written::SentAll - } - } else { - Written::Error(( - RedisError::new(RedisErrorKind::Config, "Expected clustered configuration."), - None, - )) - } - } - - /// Check if the provided `server` node owns the provided `slot`. - pub fn check_cluster_owner(&self, slot: u16, server: &Server) -> bool { - match self { - Connections::Clustered { ref cache, .. } => cache - .get_server(slot) - .map(|owner| { - trace!("Comparing cached cluster owner for {}: {} == {}", slot, owner, server); - owner == server - }) - .unwrap_or(false), - _ => false, - } - } - - /// Connect or reconnect to the provided `host:port`. - pub async fn add_connection( - &mut self, - inner: &RefCount, - server: &Server, - ) -> Result<(), RedisError> { - if let Connections::Clustered { ref mut writers, .. } = self { - let mut transport = connection::create(inner, server, None).await?; - transport.setup(inner, None).await?; - - let (server, writer) = connection::split(inner, transport, false, clustered::spawn_reader_task)?; - writers.insert(server, writer); - Ok(()) - } else { - Err(RedisError::new( - RedisErrorKind::Config, - "Expected clustered configuration.", - )) - } - } - - /// Read the list of active/working connections. - pub fn active_connections(&self) -> Vec { - match self { - Connections::Clustered { ref writers, .. } => writers - .iter() - .filter_map(|(server, writer)| { - if writer.is_working() { - Some(server.clone()) - } else { - None - } - }) - .collect(), - Connections::Centralized { ref writer } | Connections::Sentinel { ref writer, .. } => writer - .as_ref() - .and_then(|writer| { - if writer.is_working() { - Some(vec![writer.server.clone()]) - } else { - None - } - }) - .unwrap_or(Vec::new()), + ReconnectServer::All => "all".hash(state), + ReconnectServer::One(server) => server.hash(state), } } } @@ -506,19 +56,19 @@ impl Connections { /// A struct for routing commands to the server(s). pub struct Router { /// The connection map for each deployment type. - pub connections: Connections, - /// The inner client state associated with the router. - pub inner: RefCount, + pub connections: Connections, /// Storage for commands that should be deferred or retried later. - pub buffer: VecDeque, + pub retry_buffer: VecDeque, + /// A set to dedup pending reconnection commands. + pub pending_reconnection: HashSet, /// The replica routing interface. #[cfg(feature = "replicas")] - pub replicas: Replicas, + pub replicas: Replicas, } impl Router { /// Create a new `Router` without connecting to the server(s). - pub fn new(inner: &RefCount) -> Self { + pub fn new(inner: &RefCount) -> Self { let connections = if inner.config.server.is_clustered() { Connections::new_clustered() } else if inner.config.server.is_sentinel() { @@ -528,212 +78,204 @@ impl Router { }; Router { - buffer: VecDeque::new(), - inner: inner.clone(), + retry_buffer: VecDeque::new(), + pending_reconnection: HashSet::new(), connections, #[cfg(feature = "replicas")] replicas: Replicas::new(), } } - /// Read the server that should receive the provided command. - #[cfg(any(feature = "transactions", feature = "replicas"))] - pub fn find_connection(&self, command: &RedisCommand) -> Option<&Server> { + /// Find the primary node that owns the hash slot used by the command. + #[cfg(feature = "replicas")] + pub fn cluster_owner(&self, command: &Command) -> Option<&Server> { match self.connections { - Connections::Centralized { ref writer } => writer.as_ref().map(|w| &w.server), - Connections::Sentinel { ref writer } => writer.as_ref().map(|w| &w.server), Connections::Clustered { ref cache, .. } => command.cluster_hash().and_then(|slot| cache.get_server(slot)), + _ => None, } } - pub fn has_healthy_centralized_connection(&self) -> bool { - match self.connections { - Connections::Centralized { ref writer } | Connections::Sentinel { ref writer } => { - writer.as_ref().map(|w| w.is_working()).unwrap_or(false) + /// Whether a deferred reconnection command exists for the provided server. + pub fn has_pending_reconnection(&self, server: &Option<&Server>) -> bool { + match server { + Some(server) => { + self.pending_reconnection.contains(&ReconnectServer::All) + || self + .pending_reconnection + .contains(&ReconnectServer::One((*server).clone())) }, - Connections::Clustered { .. } => false, + None => self.pending_reconnection.contains(&ReconnectServer::All), } } - /// Attempt to send the command to the server. - pub async fn write(&mut self, command: RedisCommand, force_flush: bool) -> Written { - let send_all_cluster_nodes = - self.inner.config.server.is_clustered() && (command.is_all_cluster_nodes() || command.kind.closes_connection()); - - if command.write_attempts >= 1 { - self.inner.counters.incr_redelivery_count(); - } - if send_all_cluster_nodes { - self.connections.write_all_cluster(&self.inner, command).await + pub fn reset_pending_reconnection(&mut self, server: Option<&Server>) { + if let Some(server) = server { + self.pending_reconnection.remove(&ReconnectServer::One(server.clone())); } else { - self.connections.write(&self.inner, command, force_flush).await + self.pending_reconnection.clear(); } } - /// Write a command to a replica node if possible, falling back to a primary node if configured. + /// Find the connection that should receive the provided command. #[cfg(feature = "replicas")] - pub async fn write_replica(&mut self, mut command: RedisCommand, force_flush: bool) -> Written { - if !command.use_replica { - return self.write(command, force_flush).await; - } - - let primary = match self.find_connection(&command) { - Some(server) => server.clone(), + pub fn route(&mut self, command: &Command) -> Option<&mut Connection> { + if command.is_all_cluster_nodes() { + return None; + } + + match command.cluster_node.as_ref() { + Some(server) => { + if command.use_replica { + self + .replicas + .routing + .next_replica(server) + .and_then(|replica| self.replicas.connections.get_mut(replica)) + } else { + self.connections.get_connection_mut(server) + } + }, None => { - return if self.inner.connection.replica.primary_fallback { - debug!( - "{}: Fallback to primary node connection for {} ({})", - self.inner.id, - command.kind.to_str_debug(), - command.debug_id() - ); - - command.use_replica = false; - self.write(command, force_flush).await + if command.use_replica { + match self.cluster_owner(command).cloned() { + Some(primary) => match self.replicas.routing.next_replica(&primary) { + Some(replica) => self.replicas.connections.get_mut(replica), + None => None, + }, + None => None, + } } else { - command.finish( - &self.inner, - Err(RedisError::new( - RedisErrorKind::Replica, - "Missing primary node connection.", - )), - ); - - Written::Ignore + match self.connections { + Connections::Centralized { + connection: ref mut writer, + } => writer.as_mut(), + Connections::Sentinel { + connection: ref mut writer, + } => writer.as_mut(), + Connections::Clustered { + connections: ref mut writers, + ref cache, + } => { + let server = command.cluster_hash().and_then(|slot| cache.get_server(slot)); + let has_server = server.map(|server| writers.contains_key(server)).unwrap_or(false); + + if has_server { + server.and_then(|server| writers.get_mut(server)) + } else { + writers.values_mut().next() + } + }, + } } }, - }; - - let result = self.replicas.write(&self.inner, &primary, command, force_flush).await; - match result { - Written::Fallback(mut command) => { - debug!( - "{}: Fall back to primary node for {} ({}) after replica error", - self.inner.id, - command.kind.to_str_debug(), - command.debug_id(), - ); + } + } - command.use_replica = false; - self.write(command, force_flush).await + /// Find the connection that should receive the provided command. + #[cfg(not(feature = "replicas"))] + pub fn route<'a>(&'a mut self, command: &Command) -> Option<&'a mut Connection> { + if command.is_all_cluster_nodes() { + return None; + } + + match command.cluster_node.as_ref() { + Some(server) => self.connections.get_connection_mut(server), + None => match self.connections { + Connections::Centralized { + connection: ref mut writer, + .. + } => writer.as_mut(), + Connections::Sentinel { + connection: ref mut writer, + .. + } => writer.as_mut(), + Connections::Clustered { + connections: ref mut writers, + ref cache, + } => { + let server = command.cluster_hash().and_then(|slot| cache.get_server(slot)); + let has_server = server.map(|server| writers.contains_key(server)).unwrap_or(false); + + if has_server { + server.and_then(|server| writers.get_mut(server)) + } else { + writers.values_mut().next() + } + }, }, - _ => result, } } - /// Write a command to a replica node if possible, falling back to a primary node if configured. - #[cfg(not(feature = "replicas"))] - pub async fn write_replica(&mut self, command: RedisCommand, force_flush: bool) -> Written { - self.write(command, force_flush).await + #[cfg(feature = "replicas")] + pub fn get_connection_mut(&mut self, server: &Server) -> Option<&mut Connection> { + self + .connections + .get_connection_mut(server) + .or_else(|| self.replicas.connections.get_mut(server)) } - /// Attempt to write the command to a specific server without backpressure. - pub async fn write_direct(&mut self, mut command: RedisCommand, server: &Server) -> Written { - debug!( - "{}: Direct write `{}` command to {}, ID: {}", - self.inner.id, - command.kind.to_str_debug(), - server, - command.debug_id() - ); - - let writer = match self.connections.get_connection_mut(server) { - Some(writer) => writer, - None => { - trace!("{}: Missing connection to {}", self.inner.id, server); - return Written::NotFound(command); - }, - }; - let frame = match utils::prepare_command(&self.inner, &writer.counters, &mut command) { - Ok((frame, _)) => frame, - Err(e) => { - warn!( - "{}: Frame encoding error for {}", - self.inner.id, - command.kind.to_str_debug() - ); - // do not retry commands that trigger frame encoding errors - command.finish(&self.inner, Err(e)); - return Written::Ignore; - }, - }; - let blocks_connection = command.blocks_connection(); - command.write_attempts += 1; + #[cfg(not(feature = "replicas"))] + pub fn get_connection_mut(&mut self, server: &Server) -> Option<&mut Connection> { + self.connections.get_connection_mut(server) + } - if !writer.is_working() { - let error = RedisError::new(RedisErrorKind::IO, "Connection closed."); - debug!("{}: Error sending command: {:?}", self.inner.id, error); - return Written::Disconnected((Some(writer.server.clone()), Some(command), error)); - } + #[cfg(feature = "replicas")] + pub fn take_connection(&mut self, server: &Server) -> Option { + self + .connections + .take_connection(Some(server)) + .or_else(|| self.replicas.connections.remove(server)) + } - let no_incr = command.has_no_responses(); - writer.push_command(&self.inner, command); - if let Err(err) = writer.write_frame(frame, true, no_incr).await { - Written::Disconnected((Some(writer.server.clone()), None, err)) - } else { - if blocks_connection { - self.inner.backchannel.write().await.set_blocked(&writer.server); - } - Written::Sent((writer.server.clone(), true)) - } + #[cfg(not(feature = "replicas"))] + pub fn take_connection(&mut self, server: &Server) -> Option { + self.connections.take_connection(Some(server)) } /// Disconnect from all the servers, moving the in-flight messages to the internal command buffer and triggering a /// reconnection, if necessary. - pub async fn disconnect_all(&mut self) { - let commands = self.connections.disconnect_all(&self.inner).await; - self.buffer_commands(commands); - self.disconnect_replicas().await; + pub async fn disconnect_all(&mut self, inner: &RefCount) { + let commands = self.connections.disconnect_all(inner).await; + self.retry_commands(commands); + self.disconnect_replicas(inner).await; } /// Disconnect from all the servers, moving the in-flight messages to the internal command buffer and triggering a /// reconnection, if necessary. #[cfg(feature = "replicas")] - pub async fn disconnect_replicas(&mut self) { - if let Err(e) = self.replicas.clear_connections(&self.inner).await { - warn!("{}: Error disconnecting replicas: {:?}", self.inner.id, e); + pub async fn disconnect_replicas(&mut self, inner: &RefCount) { + if let Err(e) = self.replicas.clear_connections(inner).await { + _warn!(inner, "Error disconnecting replicas: {:?}", e); } } #[cfg(not(feature = "replicas"))] - pub async fn disconnect_replicas(&mut self) {} + pub async fn disconnect_replicas(&mut self, _: &RefCount) {} /// Add the provided commands to the retry buffer. - pub fn buffer_commands(&mut self, commands: impl IntoIterator) { + pub fn retry_commands(&mut self, commands: impl IntoIterator) { for command in commands.into_iter() { - self.buffer_command(command); + self.retry_command(command); } } /// Add the provided command to the retry buffer. - pub fn buffer_command(&mut self, command: RedisCommand) { - trace!( - "{}: Adding {} ({}) command to retry buffer.", - self.inner.id, - command.kind.to_str_debug(), - command.debug_id() - ); - self.buffer.push_back(command); + pub fn retry_command(&mut self, command: Command) { + self.retry_buffer.push_back(command); } /// Clear all the commands in the retry buffer. pub fn clear_retry_buffer(&mut self) { - trace!( - "{}: Clearing retry buffer with {} commands.", - self.inner.id, - self.buffer.len() - ); - self.buffer.clear(); + self.retry_buffer.clear(); } /// Connect to the server(s), discarding any previous connection state. - pub async fn connect(&mut self) -> Result<(), RedisError> { - self.disconnect_all().await; - let result = self.connections.initialize(&self.inner, &mut self.buffer).await; + pub async fn connect(&mut self, inner: &RefCount) -> Result<(), Error> { + let result = self.connections.initialize(inner, &mut self.retry_buffer).await; if result.is_ok() { #[cfg(feature = "replicas")] - self.refresh_replica_routing().await?; + self.refresh_replica_routing(inner).await?; Ok(()) } else { @@ -743,10 +285,10 @@ impl Router { /// Gracefully reset the replica routing table. #[cfg(feature = "replicas")] - pub async fn refresh_replica_routing(&mut self) -> Result<(), RedisError> { + pub async fn refresh_replica_routing(&mut self, inner: &RefCount) -> Result<(), Error> { self.replicas.clear_routing(); - if let Err(e) = self.sync_replicas().await { - if !self.inner.ignore_replica_reconnect_errors() { + if let Err(e) = self.sync_replicas(inner).await { + if !inner.ignore_replica_reconnect_errors() { return Err(e); } } @@ -757,55 +299,63 @@ impl Router { /// Sync the cached cluster state with the server via `CLUSTER SLOTS`. /// /// This will also create new connections or drop old connections as needed. - pub async fn sync_cluster(&mut self) -> Result<(), RedisError> { - let result = clustered::sync(&self.inner, &mut self.connections, &mut self.buffer).await; + pub async fn sync_cluster(&mut self, inner: &RefCount) -> Result<(), Error> { + let result = match self.connections { + Connections::Clustered { + connections: ref mut writers, + ref mut cache, + } => { + let result = clustered::sync(inner, writers, cache, &mut self.retry_buffer).await; - if result.is_ok() { - #[cfg(feature = "replicas")] - self.refresh_replica_routing().await?; - self.retry_buffer().await; - } + if result.is_ok() { + #[cfg(feature = "replicas")] + self.refresh_replica_routing(inner).await?; + // surface errors from the retry process, otherwise return the reconnection result + Box::pin(self.retry_buffer(inner)).await?; + } + result + }, + _ => Ok(()), + }; + + inner.backchannel.update_connection_ids(&self.connections); result } /// Rebuild the cached replica routing table based on the primary node connections. #[cfg(feature = "replicas")] - pub async fn sync_replicas(&mut self) -> Result<(), RedisError> { - debug!("{}: Syncing replicas...", self.inner.id); + pub async fn sync_replicas(&mut self, inner: &RefCount) -> Result<(), Error> { + _debug!(inner, "Syncing replicas..."); self.replicas.drop_broken_connections().await; - let old_connections = self.replicas.active_connections(); - let new_replica_map = self.connections.replica_map(&self.inner).await?; + let old_connections = self.replicas.active_connections().await; + let new_replica_map = self.connections.replica_map(inner).await?; let old_connections_idx: HashSet<_> = old_connections.iter().collect(); let new_connections_idx: HashSet<_> = new_replica_map.keys().collect(); let remove: Vec<_> = old_connections_idx.difference(&new_connections_idx).collect(); for server in remove.into_iter() { - debug!("{}: Dropping replica connection to {}", self.inner.id, server); - self.replicas.drop_writer(server).await; + _debug!(inner, "Dropping replica connection to {}", server); + self.replicas.drop_writer(inner, server).await; self.replicas.remove_replica(server); } for (mut replica, primary) in new_replica_map.into_iter() { - let should_use = if let Some(filter) = self.inner.connection.replica.filter.as_ref() { + let should_use = if let Some(filter) = inner.connection.replica.filter.as_ref() { filter.filter(&primary, &replica).await } else { true }; if should_use { - replicas::map_replica_tls_names(&self.inner, &primary, &mut replica); + replicas::map_replica_tls_names(inner, &primary, &mut replica); - self - .replicas - .add_connection(&self.inner, primary, replica, false) - .await?; + self.replicas.add_connection(inner, primary, replica, false).await?; } } - self - .inner + inner .server_state .write() .update_replicas(self.replicas.routing_table()); @@ -813,157 +363,161 @@ impl Router { } /// Attempt to replay all queued commands on the internal buffer without backpressure. - pub async fn retry_buffer(&mut self) { - let mut failed_commands: VecDeque<_> = VecDeque::new(); - let mut commands: VecDeque<_> = self.buffer.drain(..).collect(); + pub async fn retry_buffer(&mut self, inner: &RefCount) -> Result<(), Error> { #[cfg(feature = "replicas")] - commands.extend(self.replicas.take_retry_buffer()); + { + let commands = self.replicas.take_retry_buffer(); + self.retry_buffer.extend(commands); + } - while let Some(mut command) = commands.pop_front() { + while let Some(command) = self.retry_buffer.pop_front() { if client_utils::read_bool_atomic(&command.timed_out) { - debug!( - "{}: Ignore retrying timed out command: {}", - self.inner.id, + _debug!( + inner, + "Ignore retrying timed out command: {}", command.kind.to_str_debug() ); continue; } - if let Err(e) = command.decr_check_attempted() { - command.finish(&self.inner, Err(e)); - continue; - } - command.skip_backpressure = true; - trace!( - "{}: Retry `{}` ({}) command, attempts left: {}", - self.inner.id, + _trace!( + inner, + "Retry `{}` ({}) command, attempts remaining: {}", command.kind.to_str_debug(), command.debug_id(), command.attempts_remaining, ); + if let Err(err) = Box::pin(commands::write_command(inner, self, command)).await { + _debug!(inner, "Error retrying command: {:?}", err); + break; + } + } - let result = if command.use_replica { - self.write_replica(command, true).await - } else { - self.write(command, true).await - }; + let _ = self.flush().await; + Ok(()) + } - match result { - Written::Disconnected((server, command, error)) => { - if let Some(command) = command { - failed_commands.push_back(command); - } + /// Wait and read frames until there are no in-flight frames on primary connections. + pub async fn drain_all(&mut self, inner: &RefCount) -> Result<(), Error> { + let inner = inner.clone(); + _trace!(inner, "Draining all connections..."); + + let primary_ft = async { + match self.connections { + Connections::Clustered { + connections: ref mut writers, + .. + } => { + // drain all connections even if one of them breaks out early with an error + let _ = join_all(writers.iter_mut().map(|(_, conn)| conn.drain(&inner))) + .await + .into_iter() + .collect::, Error>>()?; - debug!( - "{}: Disconnect while retrying after write error: {:?}", - &self.inner.id, error - ); - self.connections.disconnect(&self.inner, server.as_ref()).await; - utils::defer_reconnect(&self.inner); - break; - }, - Written::NotFound(command) => { - failed_commands.push_back(command); - - warn!( - "{}: Disconnect and re-sync cluster state after routing error while retrying commands.", - self.inner.id - ); - self.disconnect_all().await; - utils::defer_reconnect(&self.inner); - break; + Ok(()) }, - Written::Error((error, command)) => { - warn!("{}: Error replaying command: {:?}", self.inner.id, error); - if let Some(command) = command { - command.finish(&self.inner, Err(error)); - } - self.disconnect_all().await; - utils::defer_reconnect(&self.inner); - break; + Connections::Centralized { + connection: ref mut writer, + } + | Connections::Sentinel { + connection: ref mut writer, + } => match writer { + Some(ref mut conn) => conn.drain(&inner).await, + None => Ok(()), }, - _ => {}, } - } - - failed_commands.extend(commands); - self.buffer_commands(failed_commands); - } + }; - /// Check each connection for pending frames that have not been flushed, and flush the connection if needed. - #[cfg(feature = "replicas")] - pub async fn check_and_flush(&mut self) -> Result<(), RedisError> { - if let Err(e) = self.replicas.check_and_flush().await { - warn!("{}: Error flushing replica connections: {:?}", self.inner.id, e); - } - self.connections.check_and_flush(&self.inner).await + #[cfg(feature = "replicas")] + return try_join(primary_ft, self.replicas.drain(&inner)).await.map(|_| ()); + #[cfg(not(feature = "replicas"))] + primary_ft.await } - #[cfg(not(feature = "replicas"))] - pub async fn check_and_flush(&mut self) -> Result<(), RedisError> { - self.connections.check_and_flush(&self.inner).await + pub async fn flush(&mut self) -> Result<(), Error> { + self.connections.flush().await?; + #[cfg(feature = "replicas")] + self.replicas.flush().await?; + Ok(()) } - /// Returns whether the provided `server` owns the provided `slot`. - pub fn cluster_node_owns_slot(&self, slot: u16, server: &Server) -> bool { + pub async fn has_healthy_centralized_connection(&mut self) -> bool { match self.connections { - Connections::Clustered { ref cache, .. } => cache.get_server(slot).map(|node| node == server).unwrap_or(false), + Connections::Centralized { + connection: ref mut writer, + } + | Connections::Sentinel { + connection: ref mut writer, + } => { + if let Some(writer) = writer { + writer.peek_reader_errors().await.is_none() + } else { + false + } + }, _ => false, } } - /// Modify connection state according to the cluster redirection error. - /// - /// * Synchronizes the cached cluster state in response to MOVED - /// * Connects and sends `ASKING` to the provided server in response to ASKED - #[cfg(feature = "transactions")] - pub async fn cluster_redirection( + /// Try to read from all sockets concurrently. + #[cfg(feature = "replicas")] + pub async fn select_read( &mut self, - kind: &ClusterErrorKind, - slot: u16, - server: &Server, - ) -> Result<(), RedisError> { - debug!( - "{}: Handling cluster redirect {:?} {} {}", - &self.inner.id, kind, slot, server - ); - - if *kind == ClusterErrorKind::Moved { - let should_sync = self - .inner - .with_cluster_state(|state| Ok(state.get_server(slot).map(|owner| server != owner).unwrap_or(true))) - .unwrap_or(true); - - if should_sync { - self.sync_cluster().await?; - } - } else if *kind == ClusterErrorKind::Ask { - if !self.connections.has_server_connection(server) { - self.connections.add_connection(&self.inner, server).await?; - self - .inner - .backchannel - .write() - .await - .update_connection_ids(&self.connections); + inner: &RefCount, + ) -> Vec<(Server, Option>)> { + match self.connections { + Connections::Centralized { + connection: ref mut writer, } + | Connections::Sentinel { + connection: ref mut writer, + } => { + if let Some(writer) = writer { + ReadFuture::new(inner, writer, &mut self.replicas.connections).await + } else { + Vec::new() + } + }, + Connections::Clustered { + connections: ref mut writers, + .. + } => ReadAllFuture::new(inner, writers, &mut self.replicas.connections).await, + } + } - // can't use request_response since there may be pipelined commands ahead of this - let (tx, rx) = oneshot_channel(); - let mut command = RedisCommand::new_asking(slot); - command.response = ResponseKind::Respond(Some(tx)); - command.skip_backpressure = true; - - match self.write_direct(command, server).await { - Written::Error((error, _)) => return Err(error), - Written::Disconnected((_, _, error)) => return Err(error), - Written::NotFound(_) => return Err(RedisError::new(RedisErrorKind::Cluster, "Connection not found.")), - _ => {}, - }; - - let _ = client_utils::timeout(rx, self.inner.internal_command_timeout()).await??; + /// Try to read from all sockets concurrently. + #[cfg(not(feature = "replicas"))] + pub async fn select_read( + &mut self, + inner: &RefCount, + ) -> Vec<(Server, Option>)> { + match self.connections { + Connections::Centralized { + connection: ref mut writer, + } + | Connections::Sentinel { + connection: ref mut writer, + } => { + if let Some(writer) = writer { + ReadFuture::new(inner, writer).await + } else { + Vec::new() + } + }, + Connections::Clustered { + connections: ref mut writers, + .. + } => ReadAllFuture::new(inner, writers).await, } + } - Ok(()) + #[cfg(feature = "replicas")] + pub fn is_replica(&self, server: &Server) -> bool { + self.replicas.connections.contains_key(server) + } + + #[cfg(not(feature = "replicas"))] + pub fn is_replica(&self, _: &Server) -> bool { + false } } diff --git a/src/router/replicas.rs b/src/router/replicas.rs index 9c015913..a1c7278e 100644 --- a/src/router/replicas.rs +++ b/src/router/replicas.rs @@ -1,27 +1,21 @@ -#[cfg(all(feature = "replicas", any(feature = "enable-native-tls", feature = "enable-rustls")))] -use crate::types::TlsHostMapping; -#[cfg(feature = "replicas")] use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, - protocol::{ - command::RedisCommand, - connection, - connection::{CommandBuffer, RedisWriter}, - }, - router::{centralized, clustered, utils, Written}, + error::Error, + modules::inner::ClientInner, + protocol::{command::Command, connection, connection::Connection}, runtime::RefCount, - types::Server, + types::config::Server, }; -#[cfg(feature = "replicas")] +use futures::future::join_all; use std::{ collections::{HashMap, VecDeque}, fmt, fmt::Formatter, }; +#[cfg(any(feature = "enable-native-tls", feature = "enable-rustls"))] +use crate::types::config::TlsHostMapping; + /// An interface used to filter the list of available replica nodes. -#[cfg(feature = "replicas")] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] #[async_trait] pub trait ReplicaFilter: Send + Sync + 'static { @@ -33,7 +27,11 @@ pub trait ReplicaFilter: Send + Sync + 'static { } /// Configuration options for replica node connections. -#[cfg(feature = "replicas")] +/// +/// When connecting to a replica the client will use the parameters specified in the +/// [ReconnectPolicy](crate::types::config::ReconnectPolicy). +/// +/// Currently only clustered replicas are supported. #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] #[derive(Clone)] pub struct ReplicaConfig { @@ -47,51 +45,42 @@ pub struct ReplicaConfig { pub filter: Option>, /// Whether the client should ignore errors from replicas that occur when the max reconnection count is reached. /// + /// This implies `primary_fallback: true`. + /// /// Default: `true` pub ignore_reconnection_errors: bool, - /// The number of times a command can fail with a replica connection error before being sent to a primary node. - /// - /// Default: `0` (unlimited) - pub connection_error_count: u32, /// Whether the client should use the associated primary node if no replica exists that can serve a command. /// /// Default: `true` pub primary_fallback: bool, } -#[cfg(feature = "replicas")] impl fmt::Debug for ReplicaConfig { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("ReplicaConfig") .field("lazy_connections", &self.lazy_connections) .field("ignore_reconnection_errors", &self.ignore_reconnection_errors) - .field("connection_error_count", &self.connection_error_count) .field("primary_fallback", &self.primary_fallback) .finish() } } -#[cfg(feature = "replicas")] impl PartialEq for ReplicaConfig { fn eq(&self, other: &Self) -> bool { self.lazy_connections == other.lazy_connections && self.ignore_reconnection_errors == other.ignore_reconnection_errors - && self.connection_error_count == other.connection_error_count && self.primary_fallback == other.primary_fallback } } -#[cfg(feature = "replicas")] impl Eq for ReplicaConfig {} -#[cfg(feature = "replicas")] impl Default for ReplicaConfig { fn default() -> Self { ReplicaConfig { lazy_connections: true, filter: None, ignore_reconnection_errors: true, - connection_error_count: 0, primary_fallback: true, } } @@ -100,14 +89,12 @@ impl Default for ReplicaConfig { /// A container for round-robin routing among replica nodes. // This implementation optimizes for next() at the cost of add() and remove() #[derive(Clone, Debug, PartialEq, Eq, Default)] -#[cfg(feature = "replicas")] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] pub struct ReplicaRouter { counter: usize, servers: Vec, } -#[cfg(feature = "replicas")] impl ReplicaRouter { /// Read the server that should receive the next command. pub fn next(&mut self) -> Option<&Server> { @@ -139,7 +126,6 @@ impl ReplicaRouter { } /// A container for round-robin routing to replica servers. -#[cfg(feature = "replicas")] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] #[derive(Clone, Debug, Eq, PartialEq, Default)] pub struct ReplicaSet { @@ -147,8 +133,6 @@ pub struct ReplicaSet { servers: HashMap, } -#[cfg(feature = "replicas")] -#[allow(dead_code)] impl ReplicaSet { /// Create a new empty replica set. pub fn new() -> ReplicaSet { @@ -220,18 +204,6 @@ impl ReplicaSet { out } - /// Read the set of all known replica nodes for all primary nodes. - pub fn all_replicas(&self) -> Vec { - let mut out = Vec::with_capacity(self.servers.len()); - for (_, replicas) in self.servers.iter() { - for replica in replicas.iter() { - out.push(replica.clone()); - } - } - - out - } - /// Clear the routing table. pub fn clear(&mut self) { self.servers.clear(); @@ -241,9 +213,9 @@ impl ReplicaSet { /// A struct for routing commands to replica nodes. #[cfg(feature = "replicas")] pub struct Replicas { - pub(crate) writers: HashMap, - routing: ReplicaSet, - buffer: VecDeque, + pub connections: HashMap, + pub routing: ReplicaSet, + pub buffer: VecDeque, } #[cfg(feature = "replicas")] @@ -251,16 +223,16 @@ pub struct Replicas { impl Replicas { pub fn new() -> Replicas { Replicas { - writers: HashMap::new(), - routing: ReplicaSet::new(), - buffer: VecDeque::new(), + connections: HashMap::new(), + routing: ReplicaSet::new(), + buffer: VecDeque::new(), } } /// Sync the connection map in place based on the cached routing table. - pub async fn sync_connections(&mut self, inner: &RefCount) -> Result<(), RedisError> { - for (_, writer) in self.writers.drain() { - let commands = writer.graceful_close().await; + pub async fn sync_connections(&mut self, inner: &RefCount) -> Result<(), Error> { + for (_, mut writer) in self.connections.drain() { + let commands = writer.close().await; self.buffer.extend(commands); } @@ -272,7 +244,7 @@ impl Replicas { } /// Drop all connections and clear the cached routing table. - pub async fn clear_connections(&mut self, inner: &RefCount) -> Result<(), RedisError> { + pub async fn clear_connections(&mut self, inner: &RefCount) -> Result<(), Error> { self.routing.clear(); self.sync_connections(inner).await } @@ -285,11 +257,11 @@ impl Replicas { /// Connect to the replica and add it to the cached routing table. pub async fn add_connection( &mut self, - inner: &RefCount, + inner: &RefCount, primary: Server, replica: Server, force: bool, - ) -> Result<(), RedisError> { + ) -> Result<(), Error> { _debug!( inner, "Adding replica connection {} (replica) -> {} (primary)", @@ -301,14 +273,18 @@ impl Replicas { let mut transport = connection::create(inner, &replica, None).await?; transport.setup(inner, None).await?; - let (_, writer) = if inner.config.server.is_clustered() { + if inner.config.server.is_clustered() { transport.readonly(inner, None).await?; - connection::split(inner, transport, true, clustered::spawn_reader_task)? - } else { - connection::split(inner, transport, true, centralized::spawn_reader_task)? }; - self.writers.insert(replica.clone(), writer); + if let Some(id) = transport.id { + inner + .backchannel + .connection_ids + .lock() + .insert(transport.server.clone(), id); + } + self.connections.insert(replica.clone(), transport.into_pipelined(true)); } self.routing.add(primary, replica); @@ -316,10 +292,10 @@ impl Replicas { } /// Drop the socket associated with the provided server. - pub async fn drop_writer(&mut self, replica: &Server) { - if let Some(writer) = self.writers.remove(replica) { - let commands = writer.graceful_close().await; - self.buffer.extend(commands); + pub async fn drop_writer(&mut self, inner: &RefCount, replica: &Server) { + if let Some(mut writer) = self.connections.remove(replica) { + self.buffer.extend(writer.close().await); + inner.backchannel.connection_ids.lock().remove(replica); } } @@ -331,18 +307,18 @@ impl Replicas { /// Close the replica connection and optionally remove the replica from the routing table. pub async fn remove_connection( &mut self, - inner: &RefCount, + inner: &RefCount, primary: &Server, replica: &Server, keep_routable: bool, - ) -> Result<(), RedisError> { + ) -> Result<(), Error> { _debug!( inner, "Removing replica connection {} (replica) -> {} (primary)", replica, primary ); - self.drop_writer(replica).await; + self.drop_writer(inner, replica).await; if !keep_routable { self.routing.remove(primary, replica); @@ -351,8 +327,8 @@ impl Replicas { } /// Check and flush all the sockets managed by the replica routing state. - pub async fn check_and_flush(&mut self) -> Result<(), RedisError> { - for (_, writer) in self.writers.iter_mut() { + pub async fn flush(&mut self) -> Result<(), Error> { + for (_, writer) in self.connections.iter_mut() { writer.flush().await?; } @@ -360,21 +336,22 @@ impl Replicas { } /// Whether a working connection exists to any replica for the provided primary node. - pub fn has_replica_connection(&self, primary: &Server) -> bool { + pub async fn has_replica_connection(&mut self, primary: &Server) -> bool { for replica in self.routing.replicas(primary) { - if self.has_connection(replica) { - return true; + if let Some(replica) = self.connections.get_mut(replica) { + if replica.peek_reader_errors().await.is_some() { + continue; + } else { + return true; + } + } else { + continue; } } false } - /// Whether a connection exists to the provided replica node. - pub fn has_connection(&self, replica: &Server) -> bool { - self.writers.get(replica).map(|w| w.is_working()).unwrap_or(false) - } - /// Return a map of `replica` -> `primary` server identifiers. pub fn routing_table(&self) -> HashMap { self.routing.to_map() @@ -382,157 +359,52 @@ impl Replicas { /// Check the active connections and drop any without a working reader task. pub async fn drop_broken_connections(&mut self) { - let mut new_writers = HashMap::with_capacity(self.writers.len()); - for (server, writer) in self.writers.drain() { - if writer.is_working() { - new_writers.insert(server, writer); - } else { - let commands = writer.graceful_close().await; - self.buffer.extend(commands); + let mut new_writers = HashMap::with_capacity(self.connections.len()); + for (server, mut writer) in self.connections.drain() { + if writer.peek_reader_errors().await.is_some() { + self.buffer.extend(writer.close().await); self.routing.remove_replica(&server); + } else { + new_writers.insert(server, writer); } } - self.writers = new_writers; + self.connections = new_writers; } /// Read the set of all active connections. - pub fn active_connections(&self) -> Vec { - self - .writers - .iter() - .filter_map(|(server, writer)| { - if writer.is_working() { - Some(server.clone()) - } else { - None - } - }) - .collect() - } - - /// Send a command to one of the replicas associated with the provided primary server. - pub async fn write( - &mut self, - inner: &RefCount, - primary: &Server, - mut command: RedisCommand, - force_flush: bool, - ) -> Written { - let replica = match command.cluster_node { - Some(ref server) => server.clone(), - None => match self.routing.next_replica(primary) { - Some(replica) => replica.clone(), - None => { - // we do not know of any replica node associated with the primary node - return if inner.connection.replica.primary_fallback { - Written::Fallback(command) - } else { - command.finish( - inner, - Err(RedisError::new(RedisErrorKind::Replica, "Missing replica node.")), - ); - Written::Ignore - }; - }, - }, - }; - - _trace!( - inner, - "Found replica {} (primary: {}) for {} ({})", - replica, - primary, - command.kind.to_str_debug(), - command.debug_id() - ); - - let writer = match self.writers.get_mut(&replica) { - Some(writer) => writer, - None => { - // these errors indicate that we know a replica node should exist, but we are not connected or cannot - // connect to it. in this case we want to hide the error, trigger a reconnect, and retry the command later. - if inner.connection.replica.lazy_connections { - _debug!(inner, "Lazily adding {} replica connection", replica); - if let Err(e) = self.add_connection(inner, primary.clone(), replica.clone(), true).await { - // we tried connecting once but failed. - self.routing.remove_replica(&replica); - // since we didn't get to actually send the command - command.attempts_remaining += 1; - return Written::Disconnected((Some(replica.clone()), Some(command), e)); - } - - match self.writers.get_mut(&replica) { - Some(writer) => writer, - None => { - self.routing.remove_replica(&replica); - // the connection should be here if self.add_connection succeeded - return Written::Disconnected(( - Some(replica.clone()), - Some(command), - RedisError::new(RedisErrorKind::Replica, "Missing connection."), - )); - }, - } - } else { - // we don't have a connection to the replica and we're not configured to lazily create new ones - return Written::NotFound(command); - } - }, - }; - let (frame, should_flush) = match utils::prepare_command(inner, &writer.counters, &mut command) { - Ok((frame, should_flush)) => (frame, should_flush || force_flush), - Err(e) => { - _warn!(inner, "Frame encoding error for {}", command.kind.to_str_debug()); - // do not retry commands that trigger frame encoding errors - command.finish(inner, Err(e)); - return Written::Ignore; - }, - }; - - let blocks_connection = command.blocks_connection(); - _debug!( - inner, - "Sending {} ({}) to replica {}", - command.kind.to_str_debug(), - command.debug_id(), - replica - ); - command.write_attempts += 1; - - if !writer.is_working() { - let error = RedisError::new(RedisErrorKind::IO, "Connection closed."); - - _debug!( - inner, - "Error sending replica command {}: {:?}", - command.kind.to_str_debug(), - error - ); - self.routing.remove_replica(&writer.server); - return Written::Disconnected((Some(writer.server.clone()), Some(command), error)); - } - - writer.push_command(inner, command); - if let Err(err) = writer.write_frame(frame, should_flush, false).await { - self.routing.remove_replica(&writer.server); - Written::Disconnected((Some(writer.server.clone()), None, err)) - } else { - if blocks_connection { - inner.backchannel.write().await.set_blocked(&writer.server); + pub async fn active_connections(&mut self) -> Vec { + join_all(self.connections.iter_mut().map(|(server, conn)| async move { + if conn.peek_reader_errors().await.is_some() { + None + } else { + Some(server.clone()) } - Written::Sent((writer.server.clone(), should_flush)) - } + })) + .await + .into_iter() + .flatten() + .collect() } /// Take the commands stored for retry later. - pub fn take_retry_buffer(&mut self) -> CommandBuffer { + pub fn take_retry_buffer(&mut self) -> VecDeque { self.buffer.drain(..).collect() } + + pub async fn drain(&mut self, inner: &RefCount) -> Result<(), Error> { + // let inner = inner.clone(); + let _ = join_all(self.connections.iter_mut().map(|(_, conn)| conn.drain(inner))) + .await + .into_iter() + .collect::, Error>>()?; + + Ok(()) + } } -#[cfg(all(feature = "replicas", any(feature = "enable-native-tls", feature = "enable-rustls")))] -pub fn map_replica_tls_names(inner: &RefCount, primary: &Server, replica: &mut Server) { +#[cfg(any(feature = "enable-native-tls", feature = "enable-rustls"))] +pub fn map_replica_tls_names(inner: &RefCount, primary: &Server, replica: &mut Server) { let policy = match inner.config.tls { Some(ref config) => &config.hostnames, None => { @@ -548,8 +420,5 @@ pub fn map_replica_tls_names(inner: &RefCount, primary: &Serve replica.set_tls_server_name(policy, &primary.host); } -#[cfg(all( - feature = "replicas", - not(any(feature = "enable-native-tls", feature = "enable-rustls")) -))] -pub fn map_replica_tls_names(_: &RefCount, _: &Server, _: &mut Server) {} +#[cfg(not(any(feature = "enable-native-tls", feature = "enable-rustls")))] +pub fn map_replica_tls_names(_: &RefCount, _: &Server, _: &mut Server) {} diff --git a/src/router/responses.rs b/src/router/responses.rs index 1cde3b52..6fc7e938 100644 --- a/src/router/responses.rs +++ b/src/router/responses.rs @@ -1,10 +1,12 @@ +#[cfg(feature = "i-tracking")] +use crate::types::client::Invalidation; use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, - protocol::{command::RedisCommand, types::Server, utils as protocol_utils, utils::pretty_error}, + error::{Error, ErrorKind}, + modules::inner::ClientInner, + protocol::{types::Server, utils as protocol_utils, utils::pretty_error}, runtime::RefCount, trace, - types::{ClientState, KeyspaceEvent, Message, RedisKey, RedisValue}, + types::{ClientState, Key, KeyspaceEvent, Message, Value}, utils, }; use redis_protocol::{ @@ -13,15 +15,12 @@ use redis_protocol::{ }; use std::str; -#[cfg(feature = "i-tracking")] -use crate::types::Invalidation; - const KEYSPACE_PREFIX: &str = "__keyspace@"; const KEYEVENT_PREFIX: &str = "__keyevent@"; #[cfg(feature = "i-tracking")] const INVALIDATION_CHANNEL: &str = "__redis__:invalidate"; -fn parse_keyspace_notification(channel: &str, message: &RedisValue) -> Option { +fn parse_keyspace_notification(channel: &str, message: &Value) -> Option { if channel.starts_with(KEYEVENT_PREFIX) { let parts: Vec<&str> = channel.splitn(2, '@').collect(); if parts.len() < 2 { @@ -35,7 +34,7 @@ fn parse_keyspace_notification(channel: &str, message: &RedisValue) -> Option().ok()?; let operation = suffix[1].to_owned(); - let key: RedisKey = message.clone().try_into().ok()?; + let key: Key = message.clone().try_into().ok()?; Some(KeyspaceEvent { db, key, operation }) } else if channel.starts_with(KEYSPACE_PREFIX) { @@ -50,7 +49,7 @@ fn parse_keyspace_notification(channel: &str, message: &RedisValue) -> Option().ok()?; - let key: RedisKey = suffix[1].to_owned().into(); + let key: Key = suffix[1].to_owned().into(); let operation = message.as_string()?; Some(KeyspaceEvent { db, key, operation }) @@ -60,7 +59,7 @@ fn parse_keyspace_notification(channel: &str, message: &RedisValue) -> Option, message: Message, server: &Server) { +fn broadcast_pubsub_invalidation(inner: &RefCount, message: Message, server: &Server) { if let Some(invalidation) = Invalidation::from_message(message, server) { inner.notifications.broadcast_invalidation(invalidation); } else { @@ -72,7 +71,7 @@ fn broadcast_pubsub_invalidation(inner: &RefCount, message: Me } #[cfg(not(feature = "i-tracking"))] -fn broadcast_pubsub_invalidation(_: &RefCount, _: Message, _: &Server) {} +fn broadcast_pubsub_invalidation(_: &RefCount, _: Message, _: &Server) {} #[cfg(feature = "i-tracking")] fn is_pubsub_invalidation(message: &Message) -> bool { @@ -85,7 +84,7 @@ fn is_pubsub_invalidation(_: &Message) -> bool { } #[cfg(feature = "i-tracking")] -fn broadcast_resp3_invalidation(inner: &RefCount, server: &Server, frame: Resp3Frame) { +fn broadcast_resp3_invalidation(inner: &RefCount, server: &Server, frame: Resp3Frame) { if let Resp3Frame::Push { mut data, .. } = frame { if data.len() != 2 { return; @@ -106,7 +105,7 @@ fn broadcast_resp3_invalidation(inner: &RefCount, server: &Ser } #[cfg(not(feature = "i-tracking"))] -fn broadcast_resp3_invalidation(_: &RefCount, _: &Server, _: Resp3Frame) {} +fn broadcast_resp3_invalidation(_: &RefCount, _: &Server, _: Resp3Frame) {} #[cfg(feature = "i-tracking")] fn is_resp3_invalidation(frame: &Resp3Frame) -> bool { @@ -164,11 +163,7 @@ fn is_resp3_invalidation(_: &Resp3Frame) -> bool { /// Check if the frame is part of a pubsub message, and if so route it to any listeners. /// /// If not then return it to the caller for further processing. -pub fn check_pubsub_message( - inner: &RefCount, - server: &Server, - frame: Resp3Frame, -) -> Option { +pub fn check_pubsub_message(inner: &RefCount, server: &Server, frame: Resp3Frame) -> Option { if is_subscription_response(&frame) { _debug!(inner, "Dropping unused subscription response."); return None; @@ -216,21 +211,13 @@ pub fn check_pubsub_message( None } -// TODO cleanup and rename -// this is called by the reader task after a blocking command finishes in order to mark the connection as unblocked -pub async fn check_and_set_unblocked_flag(inner: &RefCount, command: &RedisCommand) { - if command.blocks_connection() { - inner.backchannel.write().await.set_unblocked(); - } -} - /// Parse the response frame to see if it's an auth error. -fn parse_redis_auth_error(frame: &Resp3Frame) -> Option { +fn parse_auth_error(frame: &Resp3Frame) -> Option { if matches!(frame.kind(), FrameKind::SimpleError | FrameKind::BlobError) { match protocol_utils::frame_to_results(frame.clone()) { Ok(_) => None, Err(e) => match e.kind() { - RedisErrorKind::Auth => Some(e), + ErrorKind::Auth => Some(e), _ => None, }, } @@ -240,13 +227,17 @@ fn parse_redis_auth_error(frame: &Resp3Frame) -> Option { } #[cfg(feature = "custom-reconnect-errors")] -fn check_global_reconnect_errors(inner: &RefCount, frame: &Resp3Frame) -> Option { +fn check_global_reconnect_errors( + inner: &RefCount, + server: &Server, + frame: &Resp3Frame, +) -> Option { if let Resp3Frame::SimpleError { ref data, .. } = frame { for prefix in inner.connection.reconnect_errors.iter() { if data.starts_with(prefix.to_str()) { _warn!(inner, "Found reconnection error: {}", data); let error = protocol_utils::pretty_error(data); - inner.notifications.broadcast_error(error.clone()); + inner.notifications.broadcast_error(error.clone(), Some(server.clone())); return Some(error); } } @@ -258,7 +249,7 @@ fn check_global_reconnect_errors(inner: &RefCount, frame: &Res } #[cfg(not(feature = "custom-reconnect-errors"))] -fn check_global_reconnect_errors(_: &RefCount, _: &Resp3Frame) -> Option { +fn check_global_reconnect_errors(_: &RefCount, _: &Server, _: &Resp3Frame) -> Option { None } @@ -287,10 +278,10 @@ fn is_clusterdown_error(frame: &Resp3Frame) -> Option<&str> { } } -/// Check for special errors configured by the caller to initiate a reconnection process. -pub fn check_special_errors(inner: &RefCount, frame: &Resp3Frame) -> Option { +/// Check for fatal errors configured by the caller to initiate a reconnection process. +pub fn check_fatal_errors(inner: &RefCount, server: &Server, frame: &Resp3Frame) -> Option { if inner.connection.reconnect_on_auth_error { - if let Some(auth_error) = parse_redis_auth_error(frame) { + if let Some(auth_error) = parse_auth_error(frame) { return Some(auth_error); } } @@ -298,38 +289,42 @@ pub fn check_special_errors(inner: &RefCount, frame: &Resp3Fra return Some(pretty_error(error)); } - check_global_reconnect_errors(inner, frame) + check_global_reconnect_errors(inner, server, frame) +} + +/// Check for special errors, pubsub messages, or other special response frames. +/// +/// The frame is returned to the caller for further processing if necessary. +pub fn preprocess_frame( + inner: &RefCount, + server: &Server, + frame: Resp3Frame, +) -> Result, Error> { + if let Some(error) = check_fatal_errors(inner, server, &frame) { + Err(error) + } else { + Ok(check_pubsub_message(inner, server, frame)) + } } /// Handle an error in the reader task that should end the connection. -pub fn broadcast_reader_error(inner: &RefCount, server: &Server, error: Option) { +pub fn broadcast_reader_error(inner: &RefCount, server: &Server, error: Option) { _warn!(inner, "Ending reader task from {} due to {:?}", server, error); - if inner.should_reconnect() { - inner.send_reconnect(Some(server.clone()), false, None); - } if utils::read_locked(&inner.state) != ClientState::Disconnecting { inner .notifications - .broadcast_error(error.unwrap_or(RedisError::new_canceled())); + .broadcast_error(error.unwrap_or(Error::new_canceled()), Some(server.clone())); } } -#[cfg(not(feature = "replicas"))] -pub fn broadcast_replica_error(inner: &RefCount, server: &Server, error: Option) { - broadcast_reader_error(inner, server, error); -} - #[cfg(feature = "replicas")] -pub fn broadcast_replica_error(inner: &RefCount, server: &Server, error: Option) { +pub fn broadcast_replica_error(inner: &RefCount, server: &Server, error: Option) { _warn!(inner, "Ending replica reader task from {} due to {:?}", server, error); - if inner.should_reconnect() { - inner.send_replica_reconnect(server); - } if utils::read_locked(&inner.state) != ClientState::Disconnecting { inner .notifications - .broadcast_error(error.unwrap_or(RedisError::new_canceled())); + .broadcast_error(error.unwrap_or(Error::new_canceled()), Some(server.clone())); } } diff --git a/src/router/sentinel.rs b/src/router/sentinel.rs index 4be44292..60170513 100644 --- a/src/router/sentinel.rs +++ b/src/router/sentinel.rs @@ -1,16 +1,18 @@ #![allow(dead_code)] - use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, + error::{Error, ErrorKind}, + modules::inner::ClientInner, protocol::{ - command::{RedisCommand, RedisCommandKind}, - connection::{self, RedisTransport, RedisWriter}, + command::{Command, CommandKind}, + connection::{self, Connection, ExclusiveConnection}, utils as protocol_utils, }, - router::{centralized, Connections}, + router::connections::Connections, runtime::RefCount, - types::{RedisValue, Server, ServerConfig}, + types::{ + config::{Server, ServerConfig}, + Value, + }, utils, }; use bytes_utils::Str; @@ -38,17 +40,14 @@ macro_rules! stry ( match $expr { Ok(r) => r, Err(mut e) => { - e.change_kind(RedisErrorKind::Sentinel); + e.change_kind(ErrorKind::Sentinel); return Err(e); } } } ); -fn parse_sentinel_nodes_response( - inner: &RefCount, - value: RedisValue, -) -> Result, RedisError> { +fn parse_sentinel_nodes_response(inner: &RefCount, value: Value) -> Result, Error> { let result_maps: Vec> = stry!(value.convert()); let mut out = Vec::with_capacity(result_maps.len()); @@ -57,8 +56,8 @@ fn parse_sentinel_nodes_response( Some(ip) => ip, None => { _warn!(inner, "Failed to read IP for sentinel node."); - return Err(RedisError::new( - RedisErrorKind::Sentinel, + return Err(Error::new( + ErrorKind::Sentinel, "Failed to read sentinel node IP address.", )); }, @@ -67,10 +66,7 @@ fn parse_sentinel_nodes_response( Some(port) => port.parse::()?, None => { _warn!(inner, "Failed to read port for sentinel node."); - return Err(RedisError::new( - RedisErrorKind::Sentinel, - "Failed to read sentinel node port.", - )); + return Err(Error::new(ErrorKind::Sentinel, "Failed to read sentinel node port.")); }, }; @@ -94,43 +90,37 @@ fn has_different_sentinel_nodes(old: &[(String, u16)], new: &[(String, u16)]) -> } #[cfg(feature = "sentinel-auth")] -fn read_sentinel_auth(inner: &RefCount) -> Result<(Option, Option), RedisError> { +fn read_sentinel_auth(inner: &RefCount) -> Result<(Option, Option), Error> { match inner.config.server { ServerConfig::Sentinel { ref username, ref password, .. } => Ok((username.clone(), password.clone())), - _ => Err(RedisError::new( - RedisErrorKind::Config, - "Expected sentinel server configuration.", - )), + _ => Err(Error::new(ErrorKind::Config, "Expected sentinel server configuration.")), } } #[cfg(not(feature = "sentinel-auth"))] -fn read_sentinel_auth(inner: &RefCount) -> Result<(Option, Option), RedisError> { +fn read_sentinel_auth(inner: &RefCount) -> Result<(Option, Option), Error> { Ok((inner.config.username.clone(), inner.config.password.clone())) } -fn read_sentinel_hosts(inner: &RefCount) -> Result, RedisError> { +fn read_sentinel_hosts(inner: &RefCount) -> Result, Error> { inner .server_state .read() .kind .read_sentinel_nodes(&inner.config.server) - .ok_or(RedisError::new( - RedisErrorKind::Sentinel, - "Failed to read cached sentinel nodes.", - )) + .ok_or(Error::new(ErrorKind::Sentinel, "Failed to read cached sentinel nodes.")) } /// Read the `(host, port)` tuples for the known sentinel nodes, and the credentials to use when connecting. #[cfg(feature = "credential-provider")] async fn read_sentinel_credentials( - inner: &RefCount, + inner: &RefCount, server: &Server, -) -> Result<(Option, Option), RedisError> { +) -> Result<(Option, Option), Error> { let (username, password) = if let Some(ref provider) = inner.config.credential_provider { provider.fetch(Some(server)).await? } else { @@ -142,23 +132,20 @@ async fn read_sentinel_credentials( #[cfg(not(feature = "credential-provider"))] async fn read_sentinel_credentials( - inner: &RefCount, + inner: &RefCount, _: &Server, -) -> Result<(Option, Option), RedisError> { +) -> Result<(Option, Option), Error> { read_sentinel_auth(inner) } /// Read the set of sentinel nodes via `SENTINEL sentinels`. async fn read_sentinels( - inner: &RefCount, - sentinel: &mut RedisTransport, -) -> Result, RedisError> { + inner: &RefCount, + sentinel: &mut ExclusiveConnection, +) -> Result, Error> { let service_name = read_service_name(inner)?; - let command = RedisCommand::new(RedisCommandKind::Sentinel, vec![ - static_val!(SENTINELS), - service_name.into(), - ]); + let command = Command::new(CommandKind::Sentinel, vec![static_val!(SENTINELS), service_name.into()]); let frame = sentinel.request_response(command, false).await?; let response = stry!(protocol_utils::frame_to_results(frame)); _trace!(inner, "Read sentinel `sentinels` response: {:?}", response); @@ -172,7 +159,7 @@ async fn read_sentinels( } /// Connect to any of the sentinel nodes provided on the associated `RedisConfig`. -async fn connect_to_sentinel(inner: &RefCount) -> Result { +async fn connect_to_sentinel(inner: &RefCount) -> Result { let hosts = read_sentinel_hosts(inner)?; for server in hosts.into_iter() { @@ -191,30 +178,27 @@ async fn connect_to_sentinel(inner: &RefCount) -> Result) -> Result { +fn read_service_name(inner: &RefCount) -> Result { match inner.config.server { ServerConfig::Sentinel { ref service_name, .. } => Ok(service_name.to_owned()), - _ => Err(RedisError::new( - RedisErrorKind::Sentinel, - "Missing sentinel service name.", - )), + _ => Err(Error::new(ErrorKind::Sentinel, "Missing sentinel service name.")), } } /// Read the `(host, port)` tuple for the primary Redis server, as identified by the `SENTINEL /// get-master-addr-by-name` command, then return a connection to that node. async fn discover_primary_node( - inner: &RefCount, - sentinel: &mut RedisTransport, -) -> Result { + inner: &RefCount, + sentinel: &mut ExclusiveConnection, +) -> Result { let service_name = read_service_name(inner)?; - let command = RedisCommand::new(RedisCommandKind::Sentinel, vec![ + let command = Command::new(CommandKind::Sentinel, vec![ static_val!(GET_MASTER_ADDR_BY_NAME), service_name.into(), ]); @@ -225,8 +209,8 @@ async fn discover_primary_node( .await?; let response = stry!(protocol_utils::frame_to_results(frame)); let server = if response.is_null() { - return Err(RedisError::new( - RedisErrorKind::Sentinel, + return Err(Error::new( + ErrorKind::Sentinel, "Missing primary address in response from sentinel node.", )); } else { @@ -250,49 +234,52 @@ async fn discover_primary_node( /// Verify that the Redis server is a primary node and not a replica. async fn check_primary_node_role( - inner: &RefCount, - transport: &mut RedisTransport, -) -> Result<(), RedisError> { - let command = RedisCommand::new(RedisCommandKind::Role, Vec::new()); + inner: &RefCount, + transport: &mut ExclusiveConnection, +) -> Result<(), Error> { + let command = Command::new(CommandKind::Role, Vec::new()); _debug!(inner, "Checking role for redis server at {}", transport.server); let frame = stry!(transport.request_response(command, inner.is_resp3()).await); let response = stry!(protocol_utils::frame_to_results(frame)); - if let RedisValue::Array(values) = response { + if let Value::Array(values) = response { if let Some(first) = values.first() { let is_master = first.as_str().map(|s| s == "master").unwrap_or(false); if is_master { Ok(()) } else { - Err(RedisError::new( - RedisErrorKind::Sentinel, + Err(Error::new( + ErrorKind::Sentinel, format!("Invalid role: {:?}", first.as_str()), )) } } else { - Err(RedisError::new(RedisErrorKind::Sentinel, "Invalid role response.")) + Err(Error::new(ErrorKind::Sentinel, "Invalid role response.")) } } else { - Err(RedisError::new( - RedisErrorKind::Sentinel, - "Could not read redis server role.", - )) + Err(Error::new(ErrorKind::Sentinel, "Could not read redis server role.")) } } /// Update the cached backchannel state with the new connection information, disconnecting the old connection if /// needed. async fn update_sentinel_backchannel( - inner: &RefCount, - transport: &RedisTransport, -) -> Result<(), RedisError> { - let mut backchannel = inner.backchannel.write().await; - backchannel.check_and_disconnect(inner, Some(&transport.server)).await; - backchannel.connection_ids.clear(); + inner: &RefCount, + transport: &ExclusiveConnection, +) -> Result<(), Error> { + inner + .backchannel + .check_and_disconnect(inner, Some(&transport.server)) + .await; + inner.backchannel.connection_ids.lock().clear(); if let Some(id) = transport.id { - backchannel.connection_ids.insert(transport.server.clone(), id); + inner + .backchannel + .connection_ids + .lock() + .insert(transport.server.clone(), id); } Ok(()) @@ -307,11 +294,11 @@ async fn update_sentinel_backchannel( /// * Update the cached backchannel information. /// * Split and store the primary node transport on `writer`. async fn update_cached_client_state( - inner: &RefCount, - writer: &mut Option, - mut sentinel: RedisTransport, - transport: RedisTransport, -) -> Result<(), RedisError> { + inner: &RefCount, + writer: &mut Option, + mut sentinel: ExclusiveConnection, + transport: ExclusiveConnection, +) -> Result<(), Error> { let sentinels = read_sentinels(inner, &mut sentinel).await?; inner .server_state @@ -320,8 +307,7 @@ async fn update_cached_client_state( .update_sentinel_nodes(&transport.server, sentinels); let _ = update_sentinel_backchannel(inner, &transport).await; - let (_, _writer) = connection::split(inner, transport, false, centralized::spawn_reader_task)?; - *writer = Some(_writer); + *writer = Some(transport.into_pipelined(false)); Ok(()) } @@ -330,16 +316,16 @@ async fn update_cached_client_state( /// /// pub async fn initialize_connection( - inner: &RefCount, + inner: &RefCount, connections: &mut Connections, - buffer: &mut VecDeque, -) -> Result<(), RedisError> { + buffer: &mut VecDeque, +) -> Result<(), Error> { _debug!(inner, "Initializing sentinel connection."); let commands = connections.disconnect_all(inner).await; buffer.extend(commands); match connections { - Connections::Sentinel { writer } => { + Connections::Sentinel { connection: writer } => { let mut sentinel = connect_to_sentinel(inner).await?; let mut transport = discover_primary_node(inner, &mut sentinel).await?; let server = transport.server.clone(); @@ -348,7 +334,7 @@ pub async fn initialize_connection( Box::pin(async { check_primary_node_role(inner, &mut transport).await?; update_cached_client_state(inner, writer, sentinel, transport).await?; - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }), inner.internal_command_timeout(), ) @@ -357,9 +343,6 @@ pub async fn initialize_connection( inner.notifications.broadcast_reconnect(server); Ok(()) }, - _ => Err(RedisError::new( - RedisErrorKind::Config, - "Expected sentinel connections.", - )), + _ => Err(Error::new(ErrorKind::Config, "Expected sentinel connections.")), } } diff --git a/src/router/transactions.rs b/src/router/transactions.rs index 53fbe942..599db47e 100644 --- a/src/router/transactions.rs +++ b/src/router/transactions.rs @@ -1,519 +1,260 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - interfaces::Resp3Frame, - modules::inner::RedisClientInner, + error::{Error, ErrorKind}, + modules::inner::ClientInner, protocol::{ - command::{ClusterErrorKind, RedisCommand, RedisCommandKind, ResponseSender, RouterReceiver, RouterResponse}, - responders::ResponseKind, - utils::pretty_error, + command::{Command, CommandKind, ResponseSender}, + connection, + connection::Connection, + responders, + utils as protocol_utils, }, - router::{clustered::parse_cluster_error_frame, utils, Router, Written}, - runtime::{oneshot_channel, AtomicUsize, Mutex, RefCount}, - types::{ClusterHash, Server}, - utils as client_utils, + router::{utils, Router}, + runtime::RefCount, + types::{config::Server, ClusterHash, Resp3Frame}, }; -use redis_protocol::resp3::types::{FrameKind, Resp3Frame as _Resp3Frame}; -use std::iter::repeat; -/// An internal enum describing the result of an attempt to send a transaction command. -#[derive(Debug)] -enum TransactionResponse { - /// Retry the entire transaction again after reconnecting or resetting connections. - /// - /// Returned in response to a write error or a connection closing. - Retry(RedisError), - /// Send `DISCARD` and retry the entire transaction against the provided server/hash slot. - Redirection((ClusterErrorKind, u16, Server)), - /// Finish the transaction with the associated response. - Finished(Resp3Frame), - /// Continue the transaction. - /// - /// Note: if `abort_on_error` is true the transaction will continue even after errors from the server. If `false` - /// the error will instead be returned as a `Result::Err` that ends the transaction. - Continue, -} - -/// Write a command in the context of a transaction and process the router response. -/// -/// Returns the command result policy or a fatal error that should end the transaction. -async fn write_command( - inner: &RefCount, - router: &mut Router, - server: &Server, - command: RedisCommand, - abort_on_error: bool, - rx: Option, -) -> Result { - _trace!( - inner, - "Sending trx command {} ({}) to {}", - command.kind.to_str_debug(), - command.debug_id(), - server - ); - - let timeout_dur = command.timeout_dur.unwrap_or_else(|| inner.default_command_timeout()); - let result = match router.write_direct(command, server).await { - Written::Error((error, _)) => Err(error), - Written::Disconnected((_, _, error)) => Err(error), - Written::NotFound(_) => Err(RedisError::new(RedisErrorKind::Cluster, "Connection not found.")), - _ => Ok(()), - }; - if let Err(e) = result { - // TODO check fail fast and exit early w/o retry here? - _debug!(inner, "Error writing trx command: {:?}", e); - return Ok(TransactionResponse::Retry(e)); - } +/// Send DISCARD to the provided server. +async fn discard(inner: &RefCount, conn: &mut Connection) -> Result<(), Error> { + let command = Command::new(CommandKind::Discard, vec![]); + let frame = connection::request_response(inner, conn, command, Some(inner.internal_command_timeout())).await?; + let result = protocol_utils::frame_to_results(frame)?; - if let Some(rx) = rx { - match client_utils::timeout(rx, timeout_dur).await? { - RouterResponse::Continue => Ok(TransactionResponse::Continue), - RouterResponse::Ask((slot, server, _)) => { - Ok(TransactionResponse::Redirection((ClusterErrorKind::Ask, slot, server))) - }, - RouterResponse::Moved((slot, server, _)) => Ok(TransactionResponse::Redirection(( - ClusterErrorKind::Moved, - slot, - server, - ))), - RouterResponse::ConnectionClosed((err, _)) => Ok(TransactionResponse::Retry(err)), - RouterResponse::TransactionError((err, _)) => { - if abort_on_error { - Err(err) - } else { - Ok(TransactionResponse::Continue) - } - }, - RouterResponse::TransactionResult(frame) => Ok(TransactionResponse::Finished(frame)), - } + if result.is_ok() { + Ok(()) } else { - Ok(TransactionResponse::Continue) + Err(Error::new(ErrorKind::Unknown, "Unexpected DISCARD response.")) } } /// Send EXEC to the provided server. -async fn send_non_pipelined_exec( - inner: &RefCount, - router: &mut Router, - server: &Server, - id: u64, -) -> Result { - let mut command = RedisCommand::new(RedisCommandKind::Exec, vec![]); - command.can_pipeline = false; - command.skip_backpressure = true; - command.transaction_id = Some(id); - let rx = command.create_router_channel(); - - write_command(inner, router, server, command, true, Some(rx)).await -} - -/// Send DISCARD to the provided server. -async fn send_non_pipelined_discard( - inner: &RefCount, - router: &mut Router, - server: &Server, - id: u64, -) -> Result { - let mut command = RedisCommand::new(RedisCommandKind::Discard, vec![]); - command.can_pipeline = false; - command.skip_backpressure = true; - command.transaction_id = Some(id); - let rx = command.create_router_channel(); +async fn exec( + inner: &RefCount, + conn: &mut Connection, + expected: usize, +) -> Result, Error> { + let mut command = Command::new(CommandKind::Exec, vec![]); + let (frame, _) = utils::prepare_command(inner, &conn.counters, &mut command)?; + conn.write(frame, true, false).await?; + conn.flush().await?; + let mut responses = Vec::with_capacity(expected + 1); + + for _ in 0 .. (expected + 1) { + let frame = match conn.read_skip_pubsub(inner).await? { + Some(frame) => frame, + None => return Err(Error::new(ErrorKind::Protocol, "Unexpected empty frame received.")), + }; - write_command(inner, router, server, command, true, Some(rx)).await + responses.push(frame); + } + responders::sample_command_latencies(inner, &mut command); + Ok(responses) } -fn update_hash_slot(commands: &mut [RedisCommand], slot: u16) { +fn update_hash_slot(commands: &mut [Command], slot: u16) { for command in commands.iter_mut() { command.hasher = ClusterHash::Custom(slot); } } -/// Find the server that should receive the transaction, creating connections if needed. -async fn find_or_create_connection( - inner: &RefCount, - router: &mut Router, - command: &RedisCommand, -) -> Result, RedisError> { - if let Some(server) = command.cluster_node.as_ref() { - Ok(Some(server.clone())) - } else { - match router.find_connection(command) { - Some(server) => Ok(Some(server.clone())), - None => { - if inner.config.server.is_clustered() { - // optimistically sync the cluster, then fall back to a full reconnect - if router.sync_cluster().await.is_err() { - utils::delay_cluster_sync(inner).await?; - utils::reconnect_with_policy(inner, router).await? - } - } else { - utils::reconnect_with_policy(inner, router).await? - }; - - Ok(None) - }, - } - } +fn max_attempts_error(tx: ResponseSender, error: Option) { + let _ = tx.send(Err( + error.unwrap_or_else(|| Error::new(ErrorKind::Unknown, "Max attempts exceeded")), + )); } -fn build_pipeline( - commands: &[RedisCommand], - response: ResponseKind, - id: u64, -) -> Result, RedisError> { - let mut pipeline = Vec::with_capacity(commands.len() + 1); - let mut exec = RedisCommand::new(RedisCommandKind::Exec, vec![]); - exec.can_pipeline = true; - exec.skip_backpressure = true; - exec.fail_fast = true; - exec.transaction_id = Some(id); - exec.response = response - .duplicate() - .ok_or_else(|| RedisError::new(RedisErrorKind::Unknown, "Invalid pipelined transaction response."))?; - exec.response.set_expected_index(commands.len()); - - for (idx, command) in commands.iter().enumerate() { - let mut response = response - .duplicate() - .ok_or_else(|| RedisError::new(RedisErrorKind::Unknown, "Invalid pipelined transaction response."))?; - response.set_expected_index(idx); - let mut command = command.duplicate(response); - command.fail_fast = true; - command.skip_backpressure = true; - command.can_pipeline = true; - - pipeline.push(command); - } - pipeline.push(exec); - Ok(pipeline) +fn max_redirections_error(tx: ResponseSender) { + let _ = tx.send(Err(Error::new(ErrorKind::Unknown, "Max redirections exceeded"))); } -pub mod exec { - use super::*; - // TODO find a better way to combine these functions - - /// Run the transaction, following cluster redirects and reconnecting as needed. - #[allow(unused_mut)] - pub async fn non_pipelined( - inner: &RefCount, - router: &mut Router, - mut commands: Vec, - id: u64, - abort_on_error: bool, - mut tx: ResponseSender, - ) -> Result<(), RedisError> { - if commands.is_empty() { - let _ = tx.send(Ok(Resp3Frame::Null)); - return Ok(()); - } - // each of the commands should have the same options - let max_attempts = if commands[0].attempts_remaining == 0 { - inner.max_command_attempts() - } else { - commands[0].attempts_remaining - }; - let max_redirections = if commands[0].redirections_remaining == 0 { - inner.connection.max_redirections - } else { - commands[0].redirections_remaining - }; - - let mut attempted = 0; - let mut redirections = 0; - 'outer: loop { - _debug!(inner, "Starting transaction {} (attempted: {})", id, attempted); - let server = match find_or_create_connection(inner, router, &commands[0]).await? { - Some(server) => server, - None => continue, - }; +fn is_execabort(error: &Error) -> bool { + error.details().starts_with("EXECABORT") +} - let mut idx = 0; - if attempted > 0 { - inner.counters.incr_redelivery_count(); +fn process_responses(responses: Vec, abort_on_error: bool) -> Result { + // check for errors in intermediate frames then return the last frame + let num_responses = responses.len(); + for (idx, frame) in responses.into_iter().enumerate() { + if let Some(error) = protocol_utils::frame_to_error(&frame) { + let should_return_error = error.is_moved() + || error.is_ask() + || is_execabort(&error) + // return intermediate errors if `abort_on_error` + || (idx < num_responses - 1 && abort_on_error) + // always return errors from the last frame + || idx == num_responses - 1; + + if should_return_error { + return Err(error); + } else { + continue; } - // send each of the commands. the first one is always MULTI - 'inner: while idx < commands.len() { - let command = commands[idx].duplicate(ResponseKind::Skip); - let rx = command.create_router_channel(); - - // wait on each response before sending the next command in order to handle errors or follow cluster - // redirections as quickly as possible. - match write_command(inner, router, &server, command, abort_on_error, Some(rx)).await { - Ok(TransactionResponse::Continue) => { - idx += 1; - continue 'inner; - }, - Ok(TransactionResponse::Retry(error)) => { - _debug!(inner, "Retrying trx {} after error: {:?}", id, error); - if let Err(e) = send_non_pipelined_discard(inner, router, &server, id).await { - _warn!(inner, "Error sending DISCARD in trx {}: {:?}", id, e); - } - - attempted += 1; - if attempted >= max_attempts { - let _ = tx.send(Err(error)); - return Ok(()); - } else { - utils::reconnect_with_policy(inner, router).await?; - } - - continue 'outer; - }, - Ok(TransactionResponse::Redirection((kind, slot, server))) => { - redirections += 1; - if redirections > max_redirections { - let _ = tx.send(Err(RedisError::new( - RedisErrorKind::Cluster, - "Too many cluster redirections.", - ))); - return Ok(()); - } + } else if idx == num_responses - 1 { + return Ok(frame); + } + } - update_hash_slot(&mut commands, slot); - if let Err(e) = send_non_pipelined_discard(inner, router, &server, id).await { - _warn!(inner, "Error sending DISCARD in trx {}: {:?}", id, e); - } - utils::cluster_redirect_with_policy(inner, router, kind, slot, &server).await?; + Err(Error::new(ErrorKind::Protocol, "Missing transaction response.")) +} - continue 'outer; - }, - Ok(TransactionResponse::Finished(frame)) => { - let _ = tx.send(Ok(frame)); - return Ok(()); - }, - Err(error) => { - // fatal errors that end the transaction - let _ = send_non_pipelined_discard(inner, router, &server, id).await; - let _ = tx.send(Err(error)); - return Ok(()); - }, - } - } +/// Send the transaction to the server. +pub async fn send( + inner: &RefCount, + router: &mut Router, + mut commands: Vec, + id: u64, + abort_on_error: bool, + tx: ResponseSender, +) -> Result<(), Error> { + if commands.is_empty() { + let _ = tx.send(Err(Error::new(ErrorKind::InvalidCommand, "Empty transaction."))); + return Ok(()); + } - match send_non_pipelined_exec(inner, router, &server, id).await { - Ok(TransactionResponse::Finished(frame)) => { - let _ = tx.send(Ok(frame)); + _debug!(inner, "Starting transaction {}", id); + let max_attempts = commands.last().unwrap().attempts_remaining; + let max_redirections = commands.last().unwrap().redirections_remaining; + let mut attempted = 0; + let mut redirections = 0; + let mut asking: Option<(Server, u16)> = None; + + 'outer: loop { + macro_rules! retry { + ($err:expr) => {{ + attempted += 1; + if attempted > max_attempts { + max_attempts_error(tx, $err); return Ok(()); - }, - Ok(TransactionResponse::Retry(error)) => { - _debug!(inner, "Retrying trx {} after error: {:?}", id, error); - if let Err(e) = send_non_pipelined_discard(inner, router, &server, id).await { - _warn!(inner, "Error sending DISCARD in trx {}: {:?}", id, e); - } - - attempted += 1; - if attempted >= max_attempts { - let _ = tx.send(Err(error)); - return Ok(()); - } else { - utils::reconnect_with_policy(inner, router).await?; - } - + } else { + utils::reconnect_with_policy(inner, router).await?; continue 'outer; - }, - Ok(TransactionResponse::Redirection((kind, slot, dest))) => { - // doesn't make sense on EXEC, but return it as an error so it isn't lost - let _ = send_non_pipelined_discard(inner, router, &server, id).await; - let _ = tx.send(Err(RedisError::new( - RedisErrorKind::Cluster, - format!("{} {} {}", kind, slot, dest), - ))); - return Ok(()); - }, - Ok(TransactionResponse::Continue) => { - _warn!(inner, "Invalid final response to transaction {}", id); - let _ = send_non_pipelined_discard(inner, router, &server, id).await; - let _ = tx.send(Err(RedisError::new_canceled())); - return Ok(()); - }, - Err(error) => { - let _ = send_non_pipelined_discard(inner, router, &server, id).await; - let _ = tx.send(Err(error)); - return Ok(()); - }, - }; + } + }}; + } + macro_rules! discard_retry { + ($conn:expr, $err:expr) => {{ + let _ = $conn.skip_results(inner).await; + let _ = discard(inner, $conn).await; + retry!($err); + }}; } - } - #[allow(unused_mut)] - pub async fn pipelined( - inner: &RefCount, - router: &mut Router, - mut commands: Vec, - id: u64, - mut tx: ResponseSender, - ) -> Result<(), RedisError> { - if commands.is_empty() { - let _ = tx.send(Ok(Resp3Frame::Null)); - return Ok(()); + if let Err(err) = router.drain_all(inner).await { + _debug!(inner, "Error draining router before transaction: {:?}", err); + retry!(None); } - // each of the commands should have the same options - let max_attempts = if commands[0].attempts_remaining == 0 { - inner.max_command_attempts() - } else { - commands[0].attempts_remaining + // find the server that should receive the transaction + let conn = match asking.as_ref() { + Some((server, _)) => match router.get_connection_mut(server) { + Some(conn) => conn, + None => retry!(None), + }, + None => match router.route(commands.last().unwrap()) { + Some(server) => server, + None => retry!(None), + }, }; - let max_redirections = if commands[0].redirections_remaining == 0 { - inner.connection.max_redirections + + let expected = if asking.is_some() { + commands.len() + 1 } else { - commands[0].redirections_remaining + commands.len() }; - - let mut attempted = 0; - let mut redirections = 0; - 'outer: loop { - _debug!(inner, "Starting transaction {} (attempted: {})", id, attempted); - let server = match find_or_create_connection(inner, router, &commands[0]).await? { - Some(server) => server, - None => continue, - }; - - if attempted > 0 { - inner.counters.incr_redelivery_count(); - } - let (exec_tx, exec_rx) = oneshot_channel(); - let buf: Vec<_> = repeat(Resp3Frame::Null).take(commands.len() + 1).collect(); - // pipelined transactions buffer their results until a response to EXEC is received - let response = ResponseKind::Buffer { - error_early: false, - expected: commands.len() + 1, - received: RefCount::new(AtomicUsize::new(0)), - tx: RefCount::new(Mutex::new(Some(exec_tx))), - frames: RefCount::new(Mutex::new(buf)), - index: 0, + // sending ASKING first if needed + if let Some((_, slot)) = asking.as_ref() { + let mut command = Command::new_asking(*slot); + let (frame, _) = match utils::prepare_command(inner, &conn.counters, &mut command) { + Ok(frame) => frame, + Err(err) => { + let _ = tx.send(Err(err)); + return Ok(()); + }, }; - // write each command in the pipeline - let pipeline = build_pipeline(&commands, response, id)?; - for command in pipeline.into_iter() { - match write_command(inner, router, &server, command, false, None).await? { - TransactionResponse::Continue => continue, - TransactionResponse::Retry(error) => { - _debug!(inner, "Retrying pipelined trx {} after error: {:?}", id, error); - if let Err(e) = send_non_pipelined_discard(inner, router, &server, id).await { - _warn!(inner, "Error sending pipelined discard: {:?}", e); - } - - attempted += 1; - if attempted >= max_attempts { - let _ = tx.send(Err(error)); - return Ok(()); - } else { - utils::reconnect_with_policy(inner, router).await?; - } - continue 'outer; - }, - _ => { - _error!(inner, "Unexpected pipelined write response."); - let _ = tx.send(Err(RedisError::new( - RedisErrorKind::Protocol, - "Unexpected pipeline write response.", - ))); - return Ok(()); - }, - } + if let Err(err) = conn.write(frame, true, false).await { + _debug!(inner, "Error sending trx command: {:?}", err); + retry!(Some(err)); } + } - // wait on the response and deconstruct the output frames - let mut response = match exec_rx.await.map_err(RedisError::from) { - Ok(Ok(frame)) => match frame { - Resp3Frame::Array { data, .. } => data, - _ => { - _error!(inner, "Unexpected pipelined exec response."); - let _ = tx.send(Err(RedisError::new( - RedisErrorKind::Protocol, - "Unexpected pipeline exec response.", - ))); - return Ok(()); - }, - }, - Ok(Err(err)) | Err(err) => { - _debug!( - inner, - "Reconnecting and retrying pipelined transaction after error: {:?}", - err - ); - attempted += 1; - if attempted >= max_attempts { - let _ = tx.send(Err(err)); - return Ok(()); - } else { - utils::reconnect_with_policy(inner, router).await?; - } - - continue 'outer; + // write all the commands before EXEC + for command in commands.iter_mut() { + let (frame, _) = match utils::prepare_command(inner, &conn.counters, command) { + Ok(frame) => frame, + Err(err) => { + let _ = tx.send(Err(err)); + return Ok(()); }, }; - if response.is_empty() { - let _ = tx.send(Err(RedisError::new( - RedisErrorKind::Protocol, - "Unexpected empty pipeline exec response.", - ))); - return Ok(()); + if let Err(err) = conn.write(frame, true, false).await { + _debug!(inner, "Error sending trx command: {:?}", err); + discard_retry!(conn, Some(err)); } + } - // check the last result for EXECABORT - let execabort = response - .last() - .and_then(|f| f.as_str()) - .map(|s| s.starts_with("EXECABORT")) - .unwrap_or(false); - - if execabort { - // find the first error, if it's a redirection then follow it and retry, otherwise return to the caller - let first_error = response.iter().enumerate().find_map(|(idx, frame)| { - if matches!(frame.kind(), FrameKind::SimpleError | FrameKind::BlobError) { - Some(idx) - } else { - None - } - }); + // send EXEC and process all the responses + match exec(inner, conn, expected).await { + Ok(responses) => match process_responses(responses, abort_on_error) { + Ok(result) => { + let _ = tx.send(Ok(result)); + return Ok(()); + }, + Err(err) => { + if err.is_moved() { + let slot = match protocol_utils::parse_cluster_error(err.details()) { + Ok((_, slot, _)) => slot, + Err(_) => { + let _ = discard(inner, conn).await; + let _ = tx.send(Err(Error::new(ErrorKind::Protocol, "Invalid cluster redirection."))); + return Ok(()); + }, + }; + update_hash_slot(&mut commands, slot); - if let Some(idx) = first_error { - let first_error_frame = response[idx].take(); - // check if error is a cluster redirection, otherwise return the error to the caller - if first_error_frame.is_redirection() { redirections += 1; if redirections > max_redirections { - let _ = tx.send(Err(RedisError::new( - RedisErrorKind::Cluster, - "Too many cluster redirections.", - ))); + max_redirections_error(tx); return Ok(()); + } else { + Box::pin(utils::reconnect_with_policy(inner, router)).await?; + continue; } + } else if err.is_ask() { + let (slot, server) = match protocol_utils::parse_cluster_error(err.details()) { + Ok((_, slot, server)) => match Server::from_str(&server) { + Some(server) => (slot, server), + None => { + let _ = discard(inner, conn).await; + let _ = tx.send(Err(Error::new(ErrorKind::Protocol, "Invalid ASK cluster redirection."))); + return Ok(()); + }, + }, + Err(_) => { + let _ = discard(inner, conn).await; + let _ = tx.send(Err(Error::new(ErrorKind::Protocol, "Invalid cluster redirection."))); + return Ok(()); + }, + }; - let (kind, slot, dest) = parse_cluster_error_frame(inner, &first_error_frame, &server)?; update_hash_slot(&mut commands, slot); - utils::cluster_redirect_with_policy(inner, router, kind, slot, &dest).await?; - continue 'outer; + redirections += 1; + if redirections > max_redirections { + max_redirections_error(tx); + return Ok(()); + } else { + asking = Some((server, slot)); + continue; + } } else { - // these errors are typically from the server, not from the connection layer - let error = first_error_frame.as_str().map(pretty_error).unwrap_or_else(|| { - RedisError::new( - RedisErrorKind::Protocol, - "Unexpected response to pipelined transaction.", - ) - }); - - let _ = tx.send(Err(error)); + let _ = discard(inner, conn).await; + let _ = tx.send(Err(err)); return Ok(()); } - } else { - // return the EXECABORT error to the caller if there's no other error - let error = response - .pop() - .and_then(|f| f.as_str().map(pretty_error)) - .unwrap_or_else(|| RedisError::new(RedisErrorKind::Protocol, "Invalid pipelined transaction response.")); - let _ = tx.send(Err(error)); - return Ok(()); - } - } else { - // return the last frame to the caller - let last = response.pop().unwrap_or(Resp3Frame::Null); - let _ = tx.send(Ok(last)); - return Ok(()); - } + }, + }, + Err(err) => { + _debug!(inner, "Error writing transaction: {:?}", err); + discard_retry!(conn, Some(err)) + }, } } } diff --git a/src/router/types.rs b/src/router/types.rs index 6a549292..483c8690 100644 --- a/src/router/types.rs +++ b/src/router/types.rs @@ -1,4 +1,18 @@ -use crate::protocol::types::Server; +use crate::{ + error::{Error, ErrorKind}, + modules::inner::ClientInner, + protocol::{connection::Connection, types::Server}, + runtime::RefCount, + types::Resp3Frame, +}; +use futures::stream::Stream; +use std::{ + collections::HashMap, + future::Future, + pin::Pin, + task::{Context, Poll}, + time::Instant, +}; /// Options describing how to change connections in a cluster. #[derive(Clone, Debug, Eq, PartialEq)] @@ -15,3 +29,145 @@ impl Default for ClusterChange { } } } + +fn poll_connection( + inner: &RefCount, + conn: &mut Connection, + cx: &mut Context<'_>, + buf: &mut Vec<(Server, Option>)>, + now: &Instant, +) { + match Pin::new(&mut conn.transport).poll_next(cx) { + Poll::Ready(Some(frame)) => { + conn.last_write = None; + buf.push((conn.server.clone(), Some(frame.map(|f| f.into_resp3())))); + }, + Poll::Ready(None) => { + conn.last_write = None; + buf.push((conn.server.clone(), None)); + }, + Poll::Pending => { + if let Some(duration) = inner.connection.unresponsive.max_timeout { + if let Some(last_write) = conn.last_write { + if now.saturating_duration_since(last_write) > duration { + buf.push(( + conn.server.clone(), + Some(Err(Error::new(ErrorKind::IO, "Unresponsive connection."))), + )); + } + } + } + }, + }; +} + +/// A future that reads from all connections and performs unresponsive checks. +// `poll_next` on a Framed is not cancel-safe +pub struct ReadAllFuture<'a, 'b> { + inner: &'a RefCount, + connections: &'b mut HashMap, + #[cfg(feature = "replicas")] + replicas: &'b mut HashMap, +} + +impl<'a, 'b> ReadAllFuture<'a, 'b> { + #[cfg(not(feature = "replicas"))] + pub fn new(inner: &'a RefCount, connections: &'b mut HashMap) -> Self { + Self { connections, inner } + } + + #[cfg(feature = "replicas")] + pub fn new( + inner: &'a RefCount, + connections: &'b mut HashMap, + replicas: &'b mut HashMap, + ) -> Self { + Self { + connections, + inner, + replicas, + } + } +} + +impl Future for ReadAllFuture<'_, '_> { + type Output = Vec<(Server, Option>)>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + #[cfg(feature = "replicas")] + if self.connections.is_empty() && self.replicas.is_empty() { + return Poll::Ready(Vec::new()); + } + #[cfg(not(feature = "replicas"))] + if self.connections.is_empty() { + return Poll::Ready(Vec::new()); + } + + let _self = self.get_mut(); + let now = Instant::now(); + let mut out = Vec::new(); + for (_, conn) in _self.connections.iter_mut() { + poll_connection(_self.inner, conn, cx, &mut out, &now); + } + #[cfg(feature = "replicas")] + for (_, conn) in _self.replicas.iter_mut() { + poll_connection(_self.inner, conn, cx, &mut out, &now); + } + + if out.is_empty() { + Poll::Pending + } else { + Poll::Ready(out) + } + } +} + +/// A future that reads from the connection and performs unresponsive checks. +pub struct ReadFuture<'a, 'b> { + inner: &'a RefCount, + connection: &'b mut Connection, + #[cfg(feature = "replicas")] + replicas: &'b mut HashMap, +} + +impl<'a, 'b> ReadFuture<'a, 'b> { + #[cfg(not(feature = "replicas"))] + pub fn new(inner: &'a RefCount, connection: &'b mut Connection) -> Self { + Self { connection, inner } + } + + #[cfg(feature = "replicas")] + pub fn new( + inner: &'a RefCount, + connection: &'b mut Connection, + replicas: &'b mut HashMap, + ) -> Self { + Self { + inner, + connection, + replicas, + } + } +} + +impl Future for ReadFuture<'_, '_> { + type Output = Vec<(Server, Option>)>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut out = Vec::new(); + let now = Instant::now(); + let _self = self.get_mut(); + + poll_connection(_self.inner, _self.connection, cx, &mut out, &now); + #[cfg(feature = "replicas")] + for (_, conn) in _self.replicas.iter_mut() { + poll_connection(_self.inner, conn, cx, &mut out, &now); + } + + if out.is_empty() { + Poll::Pending + } else { + Poll::Ready(out) + } + } +} diff --git a/src/router/utils.rs b/src/router/utils.rs index a1831360..d86475e9 100644 --- a/src/router/utils.rs +++ b/src/router/utils.rs @@ -1,85 +1,48 @@ use crate::{ - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces, - modules::inner::RedisClientInner, + modules::inner::ClientInner, protocol::{ - command::{RedisCommand, RouterCommand, RouterResponse}, - connection::{RedisWriter, SharedBuffer, SplitStreamKind}, - responders::ResponseKind, + command::{Command, RouterCommand}, + connection::Connection, types::*, utils as protocol_utils, }, - router::{utils, Backpressure, Counters, Router, Written}, - runtime::{oneshot_channel, sleep, RefCount}, + router::{centralized, clustered, responses, Counters, ReconnectServer, Router}, + runtime::RefCount, types::*, utils as client_utils, }; -use futures::TryStreamExt; +use bytes::Bytes; use std::{ - cmp, + collections::VecDeque, time::{Duration, Instant}, }; +use tokio::pin; -#[cfg(feature = "transactions")] -use crate::protocol::command::ClusterErrorKind; - -/// Check the connection state and command flags to determine the backpressure policy to apply, if any. -pub fn check_backpressure( - inner: &RefCount, - counters: &Counters, - command: &RedisCommand, -) -> Result, RedisError> { - if command.skip_backpressure { - return Ok(None); - } - let in_flight = client_utils::read_atomic(&counters.in_flight); - - inner.with_perf_config(|perf_config| { - // TODO clean this up and write better docs - if in_flight as u64 > perf_config.backpressure.max_in_flight_commands { - if perf_config.backpressure.disable_auto_backpressure { - Err(RedisError::new_backpressure()) - } else { - match perf_config.backpressure.policy { - BackpressurePolicy::Drain => Ok(Some(Backpressure::Block)), - BackpressurePolicy::Sleep { - disable_backpressure_scaling, - min_sleep_duration, - } => { - let duration = if disable_backpressure_scaling { - min_sleep_duration - } else { - Duration::from_millis(cmp::max(min_sleep_duration.as_millis() as u64, in_flight as u64)) - }; - - Ok(Some(Backpressure::Wait(duration))) - }, - } - } - } else { - Ok(None) - } - }) -} +static OK_FRAME: Resp3Frame = Resp3Frame::SimpleString { + data: Bytes::from_static(b"OK"), + attributes: None, +}; #[cfg(feature = "partial-tracing")] -fn set_command_trace(inner: &RefCount, command: &mut RedisCommand) { +fn set_command_trace(inner: &RefCount, command: &mut Command) { if inner.should_trace() { crate::trace::set_network_span(inner, command, true); } } #[cfg(not(feature = "partial-tracing"))] -fn set_command_trace(_inner: &RefCount, _: &mut RedisCommand) {} +fn set_command_trace(_inner: &RefCount, _: &mut Command) {} /// Prepare the command, updating flags in place. /// /// Returns the RESP frame and whether the socket should be flushed. pub fn prepare_command( - inner: &RefCount, + inner: &RefCount, counters: &Counters, - command: &mut RedisCommand, -) -> Result<(ProtocolFrame, bool), RedisError> { + command: &mut Command, +) -> Result<(ProtocolFrame, bool), Error> { let frame = protocol_utils::encode_frame(inner, command)?; // flush the socket under any of the following conditions: @@ -90,61 +53,36 @@ pub fn prepare_command( // * the command does some form of authentication // * the command goes to multiple sockets at once // * the command blocks the router command loop - let should_flush = counters.should_send(inner) - || command.kind.should_flush() - || command.is_all_cluster_nodes() - || command.has_router_channel(); - + let should_flush = counters.should_send(inner) || command.kind.should_flush() || command.is_all_cluster_nodes(); command.network_start = Some(Instant::now()); set_command_trace(inner, command); Ok((frame, should_flush)) } -/// Write a command on the provided writer half of a socket. +/// Write a command to the connection, returning whether the socket was flushed. +#[inline(always)] pub async fn write_command( - inner: &RefCount, - writer: &mut RedisWriter, - mut command: RedisCommand, + inner: &RefCount, + conn: &mut Connection, + mut command: Command, force_flush: bool, -) -> Written { - _trace!( - inner, - "Writing {} ({}). Timed out: {}, Force flush: {}", - command.kind.to_str_debug(), - command.debug_id(), - client_utils::read_bool_atomic(&command.timed_out), - force_flush - ); +) -> Result)> { if client_utils::read_bool_atomic(&command.timed_out) { _debug!( inner, "Ignore writing timed out command: {}", command.kind.to_str_debug() ); - return Written::Ignore; + return Ok(false); } - - match check_backpressure(inner, &writer.counters, &command) { - Ok(Some(backpressure)) => { - _trace!(inner, "Returning backpressure for {}", command.kind.to_str_debug()); - return Written::Backpressure((command, backpressure)); - }, - Err(e) => { - // return manual backpressure errors directly to the caller - command.finish(inner, Err(e)); - return Written::Ignore; - }, - _ => {}, - }; - - let (frame, should_flush) = match prepare_command(inner, &writer.counters, &mut command) { + let (frame, should_flush) = match prepare_command(inner, &conn.counters, &mut command) { Ok((frame, should_flush)) => (frame, should_flush || force_flush), Err(e) => { _warn!(inner, "Frame encoding error for {}", command.kind.to_str_debug()); // do not retry commands that trigger frame encoding errors - command.finish(inner, Err(e)); - return Written::Ignore; + command.respond_to_caller(Err(e)); + return Ok(false); }, }; @@ -153,142 +91,305 @@ pub async fn write_command( "Sending command {} ({}) to {}", command.kind.to_str_debug(), command.debug_id(), - writer.server + conn.server ); command.write_attempts += 1; - if !writer.is_working() { - let error = RedisError::new(RedisErrorKind::IO, "Connection closed."); - _debug!(inner, "Error sending command: {:?}", error); - return Written::Disconnected((Some(writer.server.clone()), Some(command), error)); - } + let check_unresponsive = !command.kind.is_pubsub() && inner.has_unresponsive_duration(); + let respond_early = if command.kind.closes_connection() { + command.take_responder() + } else { + None + }; - let no_incr = command.has_no_responses(); - writer.push_command(inner, command); - if let Err(err) = writer.write_frame(frame, should_flush, no_incr).await { - Written::Disconnected((Some(writer.server.clone()), None, err)) + conn.push_command(command); + let write_result = conn.write(frame, should_flush, check_unresponsive).await; + if let Err(err) = write_result { + debug!("{}: Error sending frame to socket: {:?}", conn.server, err); + Err((err, None)) } else { - Written::Sent((writer.server.clone(), should_flush)) + if let Some(tx) = respond_early { + let _ = tx.send(Ok(OK_FRAME.clone())); + } + Ok(should_flush) } } -/// Check the shared connection command buffer to see if the oldest command blocks the router task on a -/// response (not pipelined). -pub fn check_blocked_router(inner: &RefCount, buffer: &SharedBuffer, error: &Option) { - let command = match buffer.pop() { - Some(cmd) => cmd, - None => return, - }; - if command.has_router_channel() { - #[allow(unused_mut)] - let mut tx = match command.take_router_tx() { - Some(tx) => tx, - None => return, - }; - let error = error - .clone() - .unwrap_or(RedisError::new(RedisErrorKind::IO, "Connection Closed")); +pub fn defer_reconnection( + inner: &RefCount, + router: &mut Router, + server: Option<&Server>, + error: Error, + _replica: bool, +) -> Result<(), Error> { + if !inner.should_reconnect() || error.should_not_reconnect() { + return Err(error); + } - if let Err(_) = tx.send(RouterResponse::ConnectionClosed((error, command))) { - _warn!(inner, "Failed to send router connection closed error."); - } + if router.has_pending_reconnection(&server) { + _debug!(inner, "Skip defer reconnection."); + Ok(()) } else { - // this is safe to rearrange since the connection has closed and we can't guarantee command ordering when - // connections close while an entire pipeline is in flight - buffer.push(command); - } -} + _debug!(inner, "Defer reconnection to {:?} after {:?}", server, error); + // keep track of pending reconnection commands to dedup them before they're sent + if let Some(server) = server { + router.pending_reconnection.insert(ReconnectServer::One(server.clone())); + } else { + router.pending_reconnection.insert(ReconnectServer::All); + }; -pub async fn remove_cached_connection_id(inner: &RefCount, server: &Server) { - inner.backchannel.write().await.remove_connection_id(server); + interfaces::send_to_router(inner, RouterCommand::Reconnect { + server: server.cloned(), + force: false, + tx: None, + #[cfg(feature = "replicas")] + replica: _replica, + }) + } } /// Filter the shared buffer, removing commands that reached the max number of attempts and responding to each caller /// with the underlying error. pub fn check_final_write_attempt( - inner: &RefCount, - buffer: &SharedBuffer, - error: &Option, -) { + inner: &RefCount, + buffer: VecDeque, + error: Option<&Error>, +) -> VecDeque { buffer - .drain() .into_iter() - .filter_map(|command| { + .filter_map(|mut command| { if command.should_finish_with_error(inner) { - command.finish( - inner, - Err( - error - .clone() - .unwrap_or(RedisError::new(RedisErrorKind::IO, "Connection Closed")), - ), - ); + command.respond_to_caller(Err( + error.cloned().unwrap_or(Error::new(ErrorKind::IO, "Connection Closed")), + )); None } else { Some(command) } }) - .for_each(|command| { - buffer.push(command); - }); + .collect() } /// Read the next reconnection delay for the client. -pub fn next_reconnection_delay(inner: &RefCount) -> Result { +pub fn next_reconnection_delay(inner: &RefCount) -> Result { inner .policy .write() .as_mut() .and_then(|policy| policy.next_delay()) .map(Duration::from_millis) - .ok_or_else(|| RedisError::new(RedisErrorKind::Canceled, "Max reconnection attempts reached.")) + .ok_or_else(|| Error::new(ErrorKind::Canceled, "Max reconnection attempts reached.")) } /// Attempt to reconnect and replay queued commands. -pub async fn reconnect_once(inner: &RefCount, router: &mut Router) -> Result<(), RedisError> { - client_utils::set_client_state(&inner.state, ClientState::Connecting); - if let Err(e) = Box::pin(router.connect()).await { +pub async fn reconnect_once(inner: &RefCount, router: &mut Router) -> Result<(), Error> { + inner.set_client_state(ClientState::Connecting); + _trace!(inner, "Reconnecting..."); + if let Err(e) = Box::pin(router.connect(inner)).await { _debug!(inner, "Failed reconnecting with error: {:?}", e); - client_utils::set_client_state(&inner.state, ClientState::Disconnected); - inner.notifications.broadcast_error(e.clone()); + inner.set_client_state(ClientState::Disconnected); + inner.notifications.broadcast_error(e.clone(), None); Err(e) } else { - #[cfg(feature = "replicas")] - if let Err(e) = router.refresh_replica_routing().await { - _warn!(inner, "Error syncing replicas: {:?}", e); - if !inner.ignore_replica_reconnect_errors() { - client_utils::set_client_state(&inner.state, ClientState::Disconnected); - inner.notifications.broadcast_error(e.clone()); - return Err(e); - } - } // try to flush any previously in-flight commands - router.retry_buffer().await; + if let Err(err) = Box::pin(router.retry_buffer(inner)).await { + _warn!(inner, "Error flushing retry buffer: {:?}", err); + inner.set_client_state(ClientState::Disconnected); + inner.notifications.broadcast_error(err.clone(), None); + return Err(err); + } - client_utils::set_client_state(&inner.state, ClientState::Connected); + inner.set_client_state(ClientState::Connected); inner.notifications.broadcast_connect(Ok(())); inner.reset_reconnection_attempts(); Ok(()) } } +/// Disconnect, broadcast events to callers, and remove cached connection info. +pub async fn disconnect(inner: &RefCount, conn: &mut Connection, error: &Error) -> VecDeque { + let commands = conn.close().await; + let commands = check_final_write_attempt(inner, commands, Some(error)); + + #[cfg(feature = "replicas")] + if conn.replica { + responses::broadcast_replica_error(inner, &conn.server, Some(error.clone())); + } else { + responses::broadcast_reader_error(inner, &conn.server, Some(error.clone())); + } + #[cfg(not(feature = "replicas"))] + responses::broadcast_reader_error(inner, &conn.server, Some(error.clone())); + + inner.backchannel.remove_connection_id(&conn.server); + inner.backchannel.check_and_unblock(&conn.server); + commands +} + +/// Disconnect and buffer any commands to be retried later. +pub async fn drop_connection(inner: &RefCount, router: &mut Router, server: &Server, error: &Error) { + _debug!(inner, "Resetting connection to {} with error: {:?}", server, error); + if let Some(mut conn) = router.take_connection(server) { + let commands = disconnect(inner, &mut conn, error).await; + router.retry_commands(commands); + } +} + +/// Process the response frame from the provided server. +/// +/// Errors returned here should interrupt the routing task. +pub async fn process_response( + inner: &RefCount, + router: &mut Router, + server: &Server, + result: Option>, +) -> Result<(), Error> { + _trace!(inner, "Recv read result from {}", server); + + macro_rules! disconnect { + ($inner:expr, $router:expr, $server:expr, $err:expr) => {{ + let replica = $router.is_replica($server); + drop_connection($inner, $router, $server, &$err).await; + defer_reconnection($inner, $router, Some($server), $err, replica) + }}; + } + + match result { + Some(Ok(frame)) => { + let frame = match responses::preprocess_frame(inner, server, frame) { + Ok(frame) => frame, + Err(err) => { + _debug!(inner, "Error reading frame from server {}: {:?}", server, err); + return disconnect!(inner, router, server, err); + }, + }; + + if let Some(frame) = frame { + let conn = match router.get_connection_mut(server) { + Some(conn) => conn, + None => return Err(Error::new(ErrorKind::Unknown, "Missing expected connection.")), + }; + + if inner.config.server.is_clustered() { + clustered::process_response_frame(inner, conn, frame) + } else { + centralized::process_response_frame(inner, conn, frame) + } + } else { + Ok(()) + } + }, + Some(Err(err)) => { + _debug!(inner, "Error reading frame from server {}: {:?}", server, err); + disconnect!(inner, router, server, err) + }, + None => { + _debug!(inner, "Connection closed to {}", server); + let err = Error::new(ErrorKind::IO, "Connection closed."); + disconnect!(inner, router, server, err) + }, + } +} + +/// Read from sockets while waiting for the provided duration. +pub async fn read_and_sleep( + inner: &RefCount, + router: &mut Router, + duration: Duration, +) -> Result<(), Error> { + let sleep_ft = inner.wait_with_interrupt(duration); + pin!(sleep_ft); + + loop { + tokio::select! { + result = &mut sleep_ft => return result, + results = router.select_read(inner) => { + for (server, result) in results.into_iter() { + if let Err(err) = process_response(inner, router, &server, result).await { + // defer reconnections until after waiting the full duration + let replica = router.is_replica(&server); + _debug!(inner, "Error reading from {} while sleeping: {:?}", server, err); + drop_connection(inner, router, &server, &err).await; + defer_reconnection(inner, router, Some(&server), err, replica)?; + } + } + } + } + } +} + +#[cfg(feature = "replicas")] +pub fn route_replica(router: &mut Router, command: &Command) -> Result<(Server, Server), Error> { + let primary = match router.cluster_owner(command) { + Some(server) => server.clone(), + None => { + return Err(Error::new( + ErrorKind::Cluster, + "Failed to find cluster hash slot owner.", + )); + }, + }; + + // there's a special case where the caller specifies a specific cluster node that should receive the command. in + // that case the caller can specify either the primary node owner or any of the replicas. this function needs to + // check both cases and return an error if the specified cluster node doesn't match either the primary node or any + // of the replica nodes. + if let Some(node) = command.cluster_node.as_ref() { + if &primary == node { + // the caller specified the primary, so use any of the available replica nodes + let replica = match router.replicas.routing.next_replica(&primary) { + Some(replica) => replica.clone(), + None => { + return Err(Error::new( + ErrorKind::Cluster, + "Failed to find cluster hash slot owner.", + )); + }, + }; + + Ok((primary, replica)) + } else { + let replica = router + .replicas + .routing + .replicas(&primary) + .find(|replica| *replica == node) + .cloned(); + + if let Some(replica) = replica { + Ok((primary, replica)) + } else { + Err(Error::new(ErrorKind::Routing, "Failed to find replica node.")) + } + } + } else { + let replica = match router.replicas.routing.next_replica(&primary) { + Some(replica) => replica.clone(), + None => { + return Err(Error::new( + ErrorKind::Cluster, + "Failed to find cluster hash slot owner.", + )); + }, + }; + + Ok((primary, replica)) + } +} + /// Reconnect to the server(s) until the max reconnect policy attempts are reached. /// /// Errors from this function should end the connection task. -pub async fn reconnect_with_policy( - inner: &RefCount, - router: &mut Router, -) -> Result<(), RedisError> { - let mut delay = utils::next_reconnection_delay(inner)?; +pub async fn reconnect_with_policy(inner: &RefCount, router: &mut Router) -> Result<(), Error> { + let mut delay = next_reconnection_delay(inner)?; loop { if !delay.is_zero() { _debug!(inner, "Sleeping for {} ms.", delay.as_millis()); - inner.wait_with_interrupt(delay).await?; + read_and_sleep(inner, router, delay).await?; } - if let Err(e) = reconnect_once(inner, router).await { + if let Err(e) = Box::pin(reconnect_once(inner, router)).await { if e.should_not_reconnect() { return Err(e); } @@ -307,108 +408,83 @@ pub async fn reconnect_with_policy( Ok(()) } -/// Attempt to follow a cluster redirect, reconnecting as needed until the max reconnections attempts is reached. -#[cfg(feature = "transactions")] -pub async fn cluster_redirect_with_policy( - inner: &RefCount, +#[cfg(feature = "replicas")] +pub async fn add_replica_with_policy( + inner: &RefCount, router: &mut Router, - kind: ClusterErrorKind, - slot: u16, - server: &Server, -) -> Result<(), RedisError> { - let mut delay = inner.connection.cluster_cache_update_delay; - + primary: &Server, + replica: &Server, +) -> Result<(), Error> { loop { - if !delay.is_zero() { - _debug!(inner, "Sleeping for {} ms.", delay.as_millis()); - inner.wait_with_interrupt(delay).await?; - } - - if let Err(e) = router.cluster_redirection(&kind, slot, server).await { - delay = next_reconnection_delay(inner).map_err(|_| e)?; + let result = router + .replicas + .add_connection(inner, primary.clone(), replica.clone(), true) + .await; + + if let Err(err) = result { + let delay = match next_reconnection_delay(inner) { + Ok(dur) => dur, + Err(_) => return Err(err), + }; - continue; + read_and_sleep(inner, router, delay).await?; } else { break; } } + inner.reset_reconnection_attempts(); Ok(()) } -/// Repeatedly try to send `ASKING` to the provided server, reconnecting as needed.f -/// -/// Errors from this function should end the connection task. +/// Send `ASKING` to the provided server, reconnecting as needed. pub async fn send_asking_with_policy( - inner: &RefCount, + inner: &RefCount, router: &mut Router, server: &Server, slot: u16, -) -> Result<(), RedisError> { - let mut delay = inner.connection.cluster_cache_update_delay; + mut attempts_remaining: u32, +) -> Result<(), Error> { + macro_rules! next_sleep { + ($err:expr) => {{ + let delay = match next_reconnection_delay(inner) { + Ok(delay) => delay, + Err(_) => { + return Err( + $err.unwrap_or_else(|| Error::new(ErrorKind::Routing, "Unable to route command or reconnect.")), + ); + }, + }; + let _ = read_and_sleep(inner, router, delay).await; + continue; + }}; + } loop { - if !delay.is_zero() { - _debug!(inner, "Sleeping for {} ms.", delay.as_millis()); - inner.wait_with_interrupt(delay).await?; - } + let mut command = Command::new_asking(slot); + command.cluster_node = Some(server.clone()); + command.hasher = ClusterHash::Custom(slot); - if !router.connections.has_server_connection(server) { - if let Err(e) = router.sync_cluster().await { - _debug!(inner, "Error syncing cluster before ASKING: {:?}", e); - delay = utils::next_reconnection_delay(inner)?; - continue; - } + if attempts_remaining == 0 { + return Err(Error::new(ErrorKind::Routing, "Max attempts reached.")); } + attempts_remaining -= 1; - let mut command = RedisCommand::new_asking(slot); - let (tx, rx) = oneshot_channel(); - command.skip_backpressure = true; - command.response = ResponseKind::Respond(Some(tx)); - - let result = match router.write_direct(command, server).await { - Written::Error((error, _)) => Err(error), - Written::Disconnected((_, _, error)) => Err(error), - Written::NotFound(_) => Err(RedisError::new(RedisErrorKind::Cluster, "Connection not found.")), - _ => Ok(()), + let conn = match router.route(&command) { + Some(conn) => conn, + None => next_sleep!(None), }; - - if let Err(error) = result { - if error.should_not_reconnect() { - break; - } else if let Err(_) = reconnect_once(inner, router).await { - delay = utils::next_reconnection_delay(inner)?; - continue; - } else { - delay = Duration::from_millis(0); - continue; - } + let frame = protocol_utils::encode_frame(inner, &command)?; + if let Err(err) = conn.write(frame, true, false).await { + next_sleep!(Some(err)); + } + if let Err(err) = conn.flush().await { + next_sleep!(Some(err)); + } + if let Err(err) = conn.read_skip_pubsub(inner).await { + next_sleep!(Some(err)); } else { - match client_utils::timeout(rx, inner.internal_command_timeout()).await { - Ok(Err(e)) => { - // error writing the command - _debug!(inner, "Reconnect once after error from ASKING: {:?}", e); - if let Err(_) = reconnect_once(inner, router).await { - delay = utils::next_reconnection_delay(inner)?; - continue; - } else { - delay = Duration::from_millis(0); - continue; - } - }, - Err(e) => { - // command was dropped due to connection closing - _debug!(inner, "Reconnect once after rx error from ASKING: {:?}", e); - if let Err(_) = reconnect_once(inner, router).await { - delay = utils::next_reconnection_delay(inner)?; - continue; - } else { - delay = Duration::from_millis(0); - continue; - } - }, - _ => break, - } + break; } } @@ -417,35 +493,31 @@ pub async fn send_asking_with_policy( } #[cfg(feature = "replicas")] -async fn sync_cluster_replicas( - inner: &RefCount, - router: &mut Router, - reset: bool, -) -> Result<(), RedisError> { +async fn sync_cluster_replicas(inner: &RefCount, router: &mut Router, reset: bool) -> Result<(), Error> { if reset { router.replicas.clear_connections(inner).await?; } if inner.config.server.is_clustered() { - router.sync_cluster().await + router.sync_cluster(inner).await } else { - router.sync_replicas().await + router.sync_replicas(inner).await } } /// Repeatedly try to sync the cluster state, reconnecting as needed until the max reconnection attempts is reached. #[cfg(feature = "replicas")] pub async fn sync_replicas_with_policy( - inner: &RefCount, + inner: &RefCount, router: &mut Router, reset: bool, -) -> Result<(), RedisError> { +) -> Result<(), Error> { let mut delay = Duration::from_millis(0); loop { if !delay.is_zero() { _debug!(inner, "Sleeping for {} ms.", delay.as_millis()); - inner.wait_with_interrupt(delay).await?; + read_and_sleep(inner, router, delay).await?; } if let Err(e) = sync_cluster_replicas(inner, router, reset).await { @@ -455,7 +527,7 @@ pub async fn sync_replicas_with_policy( break; } else { // return the underlying error on the last attempt - delay = match utils::next_reconnection_delay(inner) { + delay = match next_reconnection_delay(inner) { Ok(delay) => delay, Err(_) => return Err(e), }; @@ -471,11 +543,9 @@ pub async fn sync_replicas_with_policy( } /// Wait for `inner.connection.cluster_cache_update_delay`. -pub async fn delay_cluster_sync(inner: &RefCount) -> Result<(), RedisError> { +pub async fn delay_cluster_sync(inner: &RefCount, router: &mut Router) -> Result<(), Error> { if inner.config.server.is_clustered() && !inner.connection.cluster_cache_update_delay.is_zero() { - inner - .wait_with_interrupt(inner.connection.cluster_cache_update_delay) - .await + read_and_sleep(inner, router, inner.connection.cluster_cache_update_delay).await } else { Ok(()) } @@ -484,26 +554,23 @@ pub async fn delay_cluster_sync(inner: &RefCount) -> Result<() /// Repeatedly try to sync the cluster state, reconnecting as needed until the max reconnection attempts is reached. /// /// Errors from this function should end the connection task. -pub async fn sync_cluster_with_policy( - inner: &RefCount, - router: &mut Router, -) -> Result<(), RedisError> { +pub async fn sync_cluster_with_policy(inner: &RefCount, router: &mut Router) -> Result<(), Error> { let mut delay = Duration::from_millis(0); loop { if !delay.is_zero() { _debug!(inner, "Sleeping for {} ms.", delay.as_millis()); - inner.wait_with_interrupt(delay).await?; + read_and_sleep(inner, router, delay).await?; } - if let Err(e) = router.sync_cluster().await { + if let Err(e) = router.sync_cluster(inner).await { _warn!(inner, "Error syncing cluster after redirect: {:?}", e); if e.should_not_reconnect() { break; } else { // return the underlying error on the last attempt - delay = match utils::next_reconnection_delay(inner) { + delay = match next_reconnection_delay(inner) { Ok(delay) => delay, Err(_) => return Err(e), }; @@ -517,91 +584,3 @@ pub async fn sync_cluster_with_policy( Ok(()) } - -pub fn defer_reconnect(inner: &RefCount) { - if inner.config.server.is_clustered() { - let (tx, _) = oneshot_channel(); - let cmd = RouterCommand::SyncCluster { tx }; - if let Err(_) = interfaces::send_to_router(inner, cmd) { - _warn!(inner, "Failed to send deferred cluster sync.") - } - } else { - let cmd = RouterCommand::Reconnect { - server: None, - tx: None, - force: false, - #[cfg(feature = "replicas")] - replica: false, - }; - if let Err(_) = interfaces::send_to_router(inner, cmd) { - _warn!(inner, "Failed to send deferred cluster sync.") - } - } -} - -/// Attempt to read the next frame from the reader half of a connection. -pub async fn next_frame( - inner: &RefCount, - conn: &mut SplitStreamKind, - server: &Server, - buffer: &SharedBuffer, -) -> Result, RedisError> { - if let Some(ref max_resp_latency) = inner.connection.unresponsive.max_timeout { - // These shenanigans were implemented in an attempt to strike a balance between a few recent changes. - // - // The entire request-response path can be lock-free if we use crossbeam-queue types under the shared buffer - // between socket halves, but these types do not support `peek` or `push_front`. Unfortunately this really limits - // or prevents most forms of conditional `pop_front` use cases. There are 3-4 places in the code where this - // matters, and this is one of them. - // - // The `UnresponsiveConfig` interface implements a heuristic where callers can express that a connection should be - // considered unresponsive if a command waits too long for a response. Before switching to crossbeam types we used - // a `Mutex` container which made this scenario easier to implement, but with crossbeam types it's more - // complicated. - // - // The approach here implements a ~~hack~~ heuristic where we measure the time since first noticing a new - // frame in the shared buffer from the reader task's perspective. This only works because we use `Stream::next` - // which is noted to be cancellation-safe in the tokio::select! docs. With this implementation the worst case - // error margin is an extra `interval`. - - let mut last_frame_sent: Option = None; - 'outer: loop { - tokio::select! { - // prefer polling the connection first - biased; - frame = conn.try_next() => return frame, - - // repeatedly check the duration since we first noticed a pending frame - _ = sleep(inner.connection.unresponsive.interval) => { - _trace!(inner, "Checking unresponsive connection to {}", server); - - // continue early if the buffer is empty or we're waiting on a blocking command. this isn't ideal, but - // this strategy just doesn't work well with blocking commands. - let buffer_len = buffer.len(); - if buffer_len == 0 || buffer.is_blocked() { - last_frame_sent = None; - continue 'outer; - } else if buffer_len > 0 && last_frame_sent.is_none() { - _trace!(inner, "Observed new request frame in unresponsive loop"); - last_frame_sent = Some(Instant::now()); - } - - if let Some(ref last_frame_sent) = last_frame_sent { - let latency = Instant::now().saturating_duration_since(*last_frame_sent); - if latency > *max_resp_latency { - _warn!(inner, "Unresponsive connection to {} after {:?}", server, latency); - inner.notifications.broadcast_unresponsive(server.clone()); - return Err(RedisError::new(RedisErrorKind::IO, "Unresponsive connection.")) - } - } - }, - } - } - } else { - _trace!(inner, "Skip waiting on interrupt rx."); - conn.try_next().await - } -} - -#[cfg(test)] -mod tests {} diff --git a/src/runtime/_tokio.rs b/src/runtime/_tokio.rs index afe6c71f..bdf5236d 100644 --- a/src/runtime/_tokio.rs +++ b/src/runtime/_tokio.rs @@ -1,50 +1,152 @@ use crate::{ clients::WithOptions, commands, - error::RedisError, - interfaces::{default_send_command, RedisResult}, - modules::inner::RedisClientInner, - protocol::command::RedisCommand, + error::Error, + interfaces::{default_send_command, FredResult}, + modules::inner::ClientInner, + protocol::command::Command, router::commands as router_commands, types::{ + config::{Config, ConnectionConfig, Options, PerformanceConfig, ReconnectPolicy, Server}, ClientState, ConnectHandle, - ConnectionConfig, CustomCommand, - FromRedis, + FromValue, InfoKind, - Options, - PerformanceConfig, - ReconnectPolicy, - RedisConfig, - RedisValue, Resp3Frame, RespVersion, - Server, + Value, Version, }, utils, }; use arc_swap::ArcSwapAny; -use futures::Stream; +use futures::{Stream, StreamExt}; use std::{future::Future, sync::Arc}; -use tokio::sync::broadcast::{Receiver, Sender}; +use tokio::sync::mpsc::{ + channel as bounded_channel, + error::{TryRecvError, TrySendError}, + unbounded_channel, + Receiver as BoundedReceiver, + Sender as BoundedSender, + UnboundedReceiver, + UnboundedSender, +}; pub use tokio::{ spawn, sync::{ - broadcast::{self, error::SendError as BroadcastSendError}, - mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + broadcast::{ + self, + error::SendError as BroadcastSendError, + Receiver as BroadcastReceiver, + Sender as BroadcastSender, + }, oneshot::{channel as oneshot_channel, Receiver as OneshotReceiver, Sender as OneshotSender}, RwLock as AsyncRwLock, }, task::JoinHandle, time::sleep, }; -use tokio_stream::wrappers::UnboundedReceiverStream; +use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; + +enum SenderKind { + Bounded(BoundedSender), + Unbounded(UnboundedSender), +} + +impl Clone for SenderKind { + fn clone(&self) -> Self { + match self { + SenderKind::Bounded(tx) => SenderKind::Bounded(tx.clone()), + SenderKind::Unbounded(tx) => SenderKind::Unbounded(tx.clone()), + } + } +} + +pub struct Sender { + tx: SenderKind, +} + +impl Clone for Sender { + fn clone(&self) -> Self { + Sender { tx: self.tx.clone() } + } +} + +impl Sender { + pub async fn send(&self, val: T) -> Result<(), T> { + match self.tx { + SenderKind::Bounded(ref tx) => tx.send(val).await.map_err(|e| e.0), + SenderKind::Unbounded(ref tx) => tx.send(val).map_err(|e| e.0), + } + } + + pub fn try_send(&self, val: T) -> Result<(), TrySendError> { + match self.tx { + SenderKind::Bounded(ref tx) => tx.try_send(val), + SenderKind::Unbounded(ref tx) => tx.send(val).map_err(|e| TrySendError::Closed(e.0)), + } + } +} + +enum ReceiverKind { + Bounded(BoundedReceiver), + Unbounded(UnboundedReceiver), +} + +pub struct Receiver { + rx: ReceiverKind, +} + +impl Receiver { + pub async fn recv(&mut self) -> Option { + match self.rx { + ReceiverKind::Bounded(ref mut tx) => tx.recv().await, + ReceiverKind::Unbounded(ref mut tx) => tx.recv().await, + } + } + + pub fn try_recv(&mut self) -> Result { + match self.rx { + ReceiverKind::Bounded(ref mut tx) => tx.try_recv(), + ReceiverKind::Unbounded(ref mut tx) => tx.try_recv(), + } + } + + pub fn into_stream(self) -> impl Stream + 'static { + match self.rx { + ReceiverKind::Bounded(tx) => ReceiverStream::new(tx).boxed(), + ReceiverKind::Unbounded(tx) => UnboundedReceiverStream::new(tx).boxed(), + } + } +} + +pub fn channel(size: usize) -> (Sender, Receiver) { + if size == 0 { + let (tx, rx) = unbounded_channel(); + ( + Sender { + tx: SenderKind::Unbounded(tx), + }, + Receiver { + rx: ReceiverKind::Unbounded(rx), + }, + ) + } else { + let (tx, rx) = bounded_channel(size); + ( + Sender { + tx: SenderKind::Bounded(tx), + }, + Receiver { + rx: ReceiverKind::Bounded(rx), + }, + ) + } +} #[cfg(any(feature = "dns", feature = "trust-dns-resolver"))] use crate::protocol::types::Resolve; - #[cfg(feature = "i-server")] use crate::types::ShutdownFlags; @@ -58,8 +160,6 @@ pub type AtomicUsize = std::sync::atomic::AtomicUsize; pub type Mutex = parking_lot::Mutex; pub type RwLock = parking_lot::RwLock; pub type RefSwap = ArcSwapAny; -pub type BroadcastSender = Sender; -pub type BroadcastReceiver = Receiver; pub fn broadcast_send(tx: &BroadcastSender, msg: &T, func: F) { if let Err(BroadcastSendError(val)) = tx.send(msg.clone()) { @@ -71,26 +171,22 @@ pub fn broadcast_channel(capacity: usize) -> (BroadcastSender, Broa broadcast::channel(capacity) } -pub fn rx_stream(rx: UnboundedReceiver) -> impl Stream { - UnboundedReceiverStream::new(rx) -} - -/// Any Redis client that implements any part of the Redis interface. +/// Any client that implements any part of the server interface. pub trait ClientLike: Clone + Send + Sync + Sized { #[doc(hidden)] - fn inner(&self) -> &Arc; + fn inner(&self) -> &Arc; /// Helper function to intercept and modify a command without affecting how it is sent to the connection layer. #[doc(hidden)] - fn change_command(&self, _: &mut RedisCommand) {} + fn change_command(&self, _: &mut Command) {} /// Helper function to intercept and customize how a command is sent to the connection layer. #[doc(hidden)] - fn send_command(&self, command: C) -> Result<(), RedisError> + fn send_command(&self, command: C) -> Result<(), Error> where - C: Into, + C: Into, { - let mut command: RedisCommand = command.into(); + let mut command: Command = command.into(); self.change_command(&mut command); default_send_command(self.inner(), command) } @@ -101,7 +197,7 @@ pub trait ClientLike: Clone + Send + Sync + Sized { } /// Read the config used to initialize the client. - fn client_config(&self) -> RedisConfig { + fn client_config(&self) -> Config { self.inner().config.as_ref().clone() } @@ -129,11 +225,6 @@ pub trait ClientLike: Clone + Send + Sync + Sized { self.inner().policy.read().is_some() } - /// Whether the client will automatically pipeline commands. - fn is_pipelined(&self) -> bool { - self.inner().is_pipelined() - } - /// Whether the client is connected to a cluster. fn is_clustered(&self) -> bool { self.inner().config.server.is_clustered() @@ -144,12 +235,12 @@ pub trait ClientLike: Clone + Send + Sync + Sized { self.inner().config.server.is_sentinel() } - /// Update the internal [PerformanceConfig](crate::types::PerformanceConfig) in place with new values. + /// Update the internal [PerformanceConfig](crate::types::config::PerformanceConfig) in place with new values. fn update_perf_config(&self, config: PerformanceConfig) { self.inner().update_performance_config(config); } - /// Read the [PerformanceConfig](crate::types::PerformanceConfig) associated with this client. + /// Read the [PerformanceConfig](crate::types::config::PerformanceConfig) associated with this client. fn perf_config(&self) -> PerformanceConfig { self.inner().performance_config() } @@ -167,9 +258,8 @@ pub trait ClientLike: Clone + Send + Sync + Sized { } /// Read the set of active connections managed by the client. - // TODO make this sync in the next major version - fn active_connections(&self) -> impl Future, RedisError>> + Send { - async { Ok(self.inner().active_connections()) } + fn active_connections(&self) -> Vec { + self.inner().active_connections() } /// Read the server version, if known. @@ -201,7 +291,7 @@ pub trait ClientLike: Clone + Send + Sync + Sized { utils::reset_router_task(&inner); tokio::spawn(async move { - utils::clear_backchannel_state(&inner).await; + inner.backchannel.clear_router_state(&inner).await; let result = router_commands::start(&inner).await; // a canceled error means we intentionally closed the client _trace!(inner, "Ending connection task with {:?}", result); @@ -212,7 +302,7 @@ pub trait ClientLike: Clone + Send + Sync + Sized { } } - utils::check_and_set_client_state(&inner.state, ClientState::Disconnecting, ClientState::Disconnected); + inner.cas_client_state(ClientState::Disconnecting, ClientState::Disconnected); result }) } @@ -220,7 +310,7 @@ pub trait ClientLike: Clone + Send + Sync + Sized { /// Force a reconnection to the server(s). /// /// When running against a cluster this function will also refresh the cached cluster routing table. - fn force_reconnection(&self) -> impl Future> + Send { + fn force_reconnection(&self) -> impl Future> + Send { async move { commands::server::force_reconnection(self.inner()).await } } @@ -228,7 +318,7 @@ pub trait ClientLike: Clone + Send + Sync + Sized { /// /// This can be used with `on_reconnect` to separate initialization logic that needs to occur only on the next /// connection attempt vs all subsequent attempts. - fn wait_for_connect(&self) -> impl Future> + Send { + fn wait_for_connect(&self) -> impl Future> + Send { async move { if utils::read_locked(&self.inner().state) == ClientState::Connected { debug!("{}: Client is already connected.", self.inner().id); @@ -252,8 +342,8 @@ pub trait ClientLike: Clone + Send + Sync + Sized { /// use fred::prelude::*; /// /// #[tokio::main] - /// async fn main() -> Result<(), RedisError> { - /// let client = RedisClient::default(); + /// async fn main() -> Result<(), Error> { + /// let client = Client::default(); /// let connection_task = client.init().await?; /// /// // ... @@ -262,11 +352,11 @@ pub trait ClientLike: Clone + Send + Sync + Sized { /// connection_task.await? /// } /// ``` - fn init(&self) -> impl Future> + Send { + fn init(&self) -> impl Future> + Send { async move { let mut rx = { self.inner().notifications.connect.load().subscribe() }; let task = self.connect(); - let error = rx.recv().await.map_err(RedisError::from).and_then(|r| r).err(); + let error = rx.recv().await.map_err(Error::from).and_then(|r| r).err(); if let Some(error) = error { // the initial connection failed, so we should gracefully close the routing task @@ -278,12 +368,13 @@ pub trait ClientLike: Clone + Send + Sync + Sized { } } - /// Close the connection to the Redis server. The returned future resolves when the command has been written to the + /// Close the connection to the server. The returned future resolves when the command has been written to the /// socket, not when the connection has been fully closed. Some time after this future resolves the future /// returned by [connect](Self::connect) will resolve which indicates that the connection has been fully closed. /// - /// This function will also close all error, pubsub message, and reconnection event streams. - fn quit(&self) -> impl Future> + Send { + /// This function will wait for pending commands to finish, and will also close all error, pubsub message, and + /// reconnection event streams. + fn quit(&self) -> impl Future> + Send { async move { commands::server::quit(self).await } } @@ -292,42 +383,42 @@ pub trait ClientLike: Clone + Send + Sync + Sized { /// #[cfg(feature = "i-server")] #[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] - fn shutdown(&self, flags: Option) -> impl Future> + Send { + fn shutdown(&self, flags: Option) -> impl Future> + Send { async move { commands::server::shutdown(self, flags).await } } /// Delete the keys in all databases. /// /// - fn flushall(&self, r#async: bool) -> impl Future> + Send + fn flushall(&self, r#async: bool) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::server::flushall(self, r#async).await?.convert() } } - /// Delete the keys on all nodes in the cluster. This is a special function that does not map directly to the Redis + /// Delete the keys on all nodes in the cluster. This is a special function that does not map directly to the server /// interface. - fn flushall_cluster(&self) -> impl Future> + Send { + fn flushall_cluster(&self) -> impl Future> + Send { async move { commands::server::flushall_cluster(self).await } } - /// Ping the Redis server. + /// Ping the server. /// /// - fn ping(&self) -> impl Future> + Send + fn ping(&self, message: Option) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { - async move { commands::server::ping(self).await?.convert() } + async move { commands::server::ping(self, message).await?.convert() } } /// Read info about the server. /// /// - fn info(&self, section: Option) -> impl Future> + Send + fn info(&self, section: Option) -> impl Future> + Send where - R: FromRedis, + R: FromValue, { async move { commands::server::info(self, section).await?.convert() } } @@ -340,11 +431,11 @@ pub trait ClientLike: Clone + Send + Sync + Sized { /// /// This interface should be used with caution as it may break the automatic pipeline features in the client if /// command flags are not properly configured. - fn custom(&self, cmd: CustomCommand, args: Vec) -> impl Future> + Send + fn custom(&self, cmd: CustomCommand, args: Vec) -> impl Future> + Send where - R: FromRedis, - T: TryInto + Send, - T::Error: Into + Send, + R: FromValue, + T: TryInto + Send, + T::Error: Into + Send, { async move { let args = utils::try_into_vec(args)?; @@ -356,10 +447,10 @@ pub trait ClientLike: Clone + Send + Sync + Sized { /// parsing. /// /// Note: RESP2 frames from the server are automatically converted to the RESP3 format when parsed by the client. - fn custom_raw(&self, cmd: CustomCommand, args: Vec) -> impl Future> + Send + fn custom_raw(&self, cmd: CustomCommand, args: Vec) -> impl Future> + Send where - T: TryInto + Send, - T::Error: Into + Send, + T: TryInto + Send, + T::Error: Into + Send, { async move { let args = utils::try_into_vec(args)?; @@ -376,16 +467,17 @@ pub trait ClientLike: Clone + Send + Sync + Sized { } } -pub fn spawn_event_listener(mut rx: BroadcastReceiver, func: F) -> JoinHandle> +pub fn spawn_event_listener(mut rx: BroadcastReceiver, func: F) -> JoinHandle> where T: Clone + Send + 'static, - F: Fn(T) -> RedisResult<()> + Send + 'static, + Fut: Future> + Send + 'static, + F: Fn(T) -> Fut + Send + 'static, { tokio::spawn(async move { let mut result = Ok(()); while let Ok(val) = rx.recv().await { - if let Err(err) = func(val) { + if let Err(err) = func(val).await { result = Err(err); break; } diff --git a/src/runtime/glommio/broadcast.rs b/src/runtime/glommio/broadcast.rs index 93424103..cf7304ec 100644 --- a/src/runtime/glommio/broadcast.rs +++ b/src/runtime/glommio/broadcast.rs @@ -1,4 +1,4 @@ -use crate::error::RedisError; +use crate::error::Error; use glommio::{ channels::local_channel::{new_unbounded, LocalReceiver, LocalSender}, GlommioError, @@ -24,10 +24,10 @@ impl BroadcastReceiver { /// Receives data from this channel. /// /// See [recv](glommio::channels::local_channel::LocalReceiver::recv) for more information. - pub async fn recv(&self) -> Result { + pub async fn recv(&self) -> Result { match self.rx.recv().await { Some(v) => Ok(v), - None => Err(RedisError::new_canceled()), + None => Err(Error::new_canceled()), } } } diff --git a/src/runtime/glommio/interfaces.rs b/src/runtime/glommio/interfaces.rs index a213e4d1..511a1b6c 100644 --- a/src/runtime/glommio/interfaces.rs +++ b/src/runtime/glommio/interfaces.rs @@ -3,26 +3,21 @@ use crate::types::ShutdownFlags; use crate::{ clients::WithOptions, commands, - error::RedisError, - interfaces::{RedisResult, Resp3Frame}, - modules::inner::RedisClientInner, + error::Error, + interfaces::{FredResult, Resp3Frame}, + modules::inner::ClientInner, prelude::default_send_command, - protocol::command::RedisCommand, + protocol::command::Command, router::commands as router_commands, runtime::{glommio::compat::spawn_into, spawn, BroadcastReceiver, JoinHandle, RefCount}, types::{ + config::{Config, ConnectionConfig, Options, PerformanceConfig, ReconnectPolicy, Server}, ClientState, ConnectHandle, - ConnectionConfig, CustomCommand, - FromRedis, + FromValue, InfoKind, - Options, - PerformanceConfig, - ReconnectPolicy, - RedisConfig, - RedisValue, - Server, + Value, }, utils, }; @@ -35,19 +30,19 @@ use crate::protocol::types::Resolve; pub trait ClientLike: Clone + Sized { #[doc(hidden)] - fn inner(&self) -> &RefCount; + fn inner(&self) -> &RefCount; /// Helper function to intercept and modify a command without affecting how it is sent to the connection layer. #[doc(hidden)] - fn change_command(&self, _: &mut RedisCommand) {} + fn change_command(&self, _: &mut Command) {} /// Helper function to intercept and customize how a command is sent to the connection layer. #[doc(hidden)] - fn send_command(&self, command: C) -> Result<(), RedisError> + fn send_command(&self, command: C) -> Result<(), Error> where - C: Into, + C: Into, { - let mut command: RedisCommand = command.into(); + let mut command: Command = command.into(); self.change_command(&mut command); default_send_command(self.inner(), command) } @@ -58,7 +53,7 @@ pub trait ClientLike: Clone + Sized { } /// Read the config used to initialize the client. - fn client_config(&self) -> RedisConfig { + fn client_config(&self) -> Config { self.inner().config.as_ref().clone() } @@ -86,11 +81,6 @@ pub trait ClientLike: Clone + Sized { self.inner().policy.read().is_some() } - /// Whether the client will automatically pipeline commands. - fn is_pipelined(&self) -> bool { - self.inner().is_pipelined() - } - /// Whether the client is connected to a cluster. fn is_clustered(&self) -> bool { self.inner().config.server.is_clustered() @@ -101,12 +91,12 @@ pub trait ClientLike: Clone + Sized { self.inner().config.server.is_sentinel() } - /// Update the internal [PerformanceConfig](crate::types::PerformanceConfig) in place with new values. + /// Update the internal [PerformanceConfig](crate::types::config::PerformanceConfig) in place with new values. fn update_perf_config(&self, config: PerformanceConfig) { self.inner().update_performance_config(config); } - /// Read the [PerformanceConfig](crate::types::PerformanceConfig) associated with this client. + /// Read the [PerformanceConfig](crate::types::config::PerformanceConfig) associated with this client. fn perf_config(&self) -> PerformanceConfig { self.inner().performance_config() } @@ -124,8 +114,8 @@ pub trait ClientLike: Clone + Sized { } /// Read the set of active connections managed by the client. - fn active_connections(&self) -> impl Future, RedisError>> { - async { Ok(self.inner().active_connections()) } + fn active_connections(&self) -> Vec { + self.inner().active_connections() } /// Read the server version, if known. @@ -158,7 +148,7 @@ pub trait ClientLike: Clone + Sized { utils::reset_router_task(&inner); let connection_ft = async move { - utils::clear_backchannel_state(&inner).await; + inner.backchannel.clear_router_state(&inner).await; let result = router_commands::start(&inner).await; // a canceled error means we intentionally closed the client _trace!(inner, "Ending connection task with {:?}", result); @@ -169,7 +159,7 @@ pub trait ClientLike: Clone + Sized { } } - utils::check_and_set_client_state(&inner.state, ClientState::Disconnecting, ClientState::Disconnected); + inner.cas_client_state(ClientState::Disconnecting, ClientState::Disconnected); result }; @@ -183,7 +173,7 @@ pub trait ClientLike: Clone + Sized { /// Force a reconnection to the server(s). /// /// When running against a cluster this function will also refresh the cached cluster routing table. - fn force_reconnection(&self) -> impl Future> { + fn force_reconnection(&self) -> impl Future> { async move { commands::server::force_reconnection(self.inner()).await } } @@ -191,7 +181,7 @@ pub trait ClientLike: Clone + Sized { /// /// This can be used with `on_reconnect` to separate initialization logic that needs to occur only on the next /// connection attempt vs all subsequent attempts. - fn wait_for_connect(&self) -> impl Future> { + fn wait_for_connect(&self) -> impl Future> { async move { if { utils::read_locked(&self.inner().state) } == ClientState::Connected { debug!("{}: Client is already connected.", self.inner().id); @@ -216,8 +206,8 @@ pub trait ClientLike: Clone + Sized { /// use fred::prelude::*; /// /// #[tokio::main] - /// async fn main() -> Result<(), RedisError> { - /// let client = RedisClient::default(); + /// async fn main() -> Result<(), Error> { + /// let client = Client::default(); /// let connection_task = client.init().await?; /// /// // ... @@ -226,11 +216,11 @@ pub trait ClientLike: Clone + Sized { /// connection_task.await? /// } /// ``` - fn init(&self) -> impl Future> { + fn init(&self) -> impl Future> { async move { let rx = { self.inner().notifications.connect.load().subscribe() }; let task = self.connect(); - let error = rx.recv().await.map_err(RedisError::from).and_then(|r| r).err(); + let error = rx.recv().await.map_err(Error::from).and_then(|r| r).err(); if let Some(error) = error { // the initial connection failed, so we should gracefully close the routing task @@ -242,12 +232,12 @@ pub trait ClientLike: Clone + Sized { } } - /// Close the connection to the Redis server. The returned future resolves when the command has been written to the + /// Close the connection to the server. The returned future resolves when the command has been written to the /// socket, not when the connection has been fully closed. Some time after this future resolves the future /// returned by [connect](Self::connect) will resolve which indicates that the connection has been fully closed. /// /// This function will also close all error, pubsub message, and reconnection event streams. - fn quit(&self) -> impl Future> { + fn quit(&self) -> impl Future> { async move { commands::server::quit(self).await } } @@ -256,42 +246,42 @@ pub trait ClientLike: Clone + Sized { /// #[cfg(feature = "i-server")] #[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] - fn shutdown(&self, flags: Option) -> impl Future> { + fn shutdown(&self, flags: Option) -> impl Future> { async move { commands::server::shutdown(self, flags).await } } /// Delete the keys in all databases. /// /// - fn flushall(&self, r#async: bool) -> impl Future> + fn flushall(&self, r#async: bool) -> impl Future> where - R: FromRedis, + R: FromValue, { async move { commands::server::flushall(self, r#async).await?.convert() } } - /// Delete the keys on all nodes in the cluster. This is a special function that does not map directly to the Redis + /// Delete the keys on all nodes in the cluster. This is a special function that does not map directly to the server /// interface. - fn flushall_cluster(&self) -> impl Future> { + fn flushall_cluster(&self) -> impl Future> { async move { commands::server::flushall_cluster(self).await } } - /// Ping the Redis server. + /// Ping the server. /// /// - fn ping(&self) -> impl Future> + fn ping(&self, message: Option) -> impl Future> where - R: FromRedis, + R: FromValue, { - async move { commands::server::ping(self).await?.convert() } + async move { commands::server::ping(self, message).await?.convert() } } /// Read info about the server. /// /// - fn info(&self, section: Option) -> impl Future> + fn info(&self, section: Option) -> impl Future> where - R: FromRedis, + R: FromValue, { async move { commands::server::info(self, section).await?.convert() } } @@ -304,11 +294,11 @@ pub trait ClientLike: Clone + Sized { /// /// This interface should be used with caution as it may break the automatic pipeline features in the client if /// command flags are not properly configured. - fn custom(&self, cmd: CustomCommand, args: Vec) -> impl Future> + fn custom(&self, cmd: CustomCommand, args: Vec) -> impl Future> where - R: FromRedis, - T: TryInto, - T::Error: Into, + R: FromValue, + T: TryInto, + T::Error: Into, { async move { let args = utils::try_into_vec(args)?; @@ -320,10 +310,10 @@ pub trait ClientLike: Clone + Sized { /// parsing. /// /// Note: RESP2 frames from the server are automatically converted to the RESP3 format when parsed by the client. - fn custom_raw(&self, cmd: CustomCommand, args: Vec) -> impl Future> + fn custom_raw(&self, cmd: CustomCommand, args: Vec) -> impl Future> where - T: TryInto, - T::Error: Into, + T: TryInto, + T::Error: Into, { async move { let args = utils::try_into_vec(args)?; @@ -340,16 +330,17 @@ pub trait ClientLike: Clone + Sized { } } -pub fn spawn_event_listener(rx: BroadcastReceiver, func: F) -> JoinHandle> +pub(crate) fn spawn_event_listener(rx: BroadcastReceiver, func: F) -> JoinHandle> where T: Clone + 'static, - F: Fn(T) -> RedisResult<()> + 'static, + Fut: Future> + 'static, + F: Fn(T) -> Fut + 'static, { spawn(async move { let mut result = Ok(()); while let Ok(val) = rx.recv().await { - if let Err(err) = func(val) { + if let Err(err) = func(val).await { result = Err(err); break; } diff --git a/src/runtime/glommio/io_compat.rs b/src/runtime/glommio/io_compat.rs index c03d1e2a..5068f515 100644 --- a/src/runtime/glommio/io_compat.rs +++ b/src/runtime/glommio/io_compat.rs @@ -1,17 +1,17 @@ -/// Reuse the same approach used by gmf (https://github.com/EtaCassiopeia/gmf/blob/591037476e6a17f83954a20558ff0e1920d94301/gmf/src/server/tokio_interop.rs#L1). -/// -/// The `Framed` codec interface used by the `Connection` struct requires that `T: AsyncRead+AsyncWrite`. -/// These traits are defined in the tokio and futures_io/futures_lite crates, but the tokio_util::codec interface -/// uses the versions re-implemented in tokio. However, glommio's network interfaces implement -/// `AsyncRead+AsyncWrite` from the futures_io crate. There are several ways to work around this, including -/// either a re-implementation of the codec traits `Encoder+Decoder`, or a compatibility layer for the different -/// versions of `AsyncRead+AsyncWrite`. The `gmf` project used the second approach, which seems much easier than -/// re-implementing the `Framed` traits (https://github.com/tokio-rs/tokio/blob/1ac8dff213937088616dc84de9adc92b4b68c49a/tokio-util/src/codec/framed_impl.rs#L125). +//! Reuse the same approach used by gmf (https://github.com/EtaCassiopeia/gmf/blob/591037476e6a17f83954a20558ff0e1920d94301/gmf/src/server/tokio_interop.rs#L1). +//! +//! The `Framed` codec interface used by the `Connection` struct requires that `T: AsyncRead+AsyncWrite`. +//! These traits are defined in the tokio and futures_io/futures_lite crates, but the tokio_util::codec interface +//! uses the versions re-implemented in tokio. However, glommio's network interfaces implement +//! `AsyncRead+AsyncWrite` from the futures_io crate. There are several ways to work around this, including +//! either a re-implementation of the codec traits `Encoder+Decoder`, or a compatibility layer for the different +//! versions of `AsyncRead+AsyncWrite`. The `gmf` project used the second approach, which seems much easier than +//! re-implementing the `Framed` traits (https://github.com/tokio-rs/tokio/blob/1ac8dff213937088616dc84de9adc92b4b68c49a/tokio-util/src/codec/framed_impl.rs#L125). // ------------------- https://github.com/EtaCassiopeia/gmf/blob/591037476e6a17f83954a20558ff0e1920d94301/gmf/src/server/tokio_interop.rs -/// This module provides interoperability with the Tokio async runtime. -/// It contains utilities to bridge between futures_lite and Tokio. +/// This module provides interoperability with the Tokio async runtime. It contains utilities to bridge between +/// futures_lite and Tokio. use std::io::{self}; use std::{ pin::Pin, @@ -21,8 +21,8 @@ use std::{ use futures_io::{AsyncRead, AsyncWrite}; use tokio::io::ReadBuf; -/// A wrapper type for AsyncRead + AsyncWrite + Unpin types, providing -/// interoperability with Tokio's AsyncRead and AsyncWrite traits. +/// A wrapper type for AsyncRead + AsyncWrite + Unpin types, providing interoperability with Tokio's AsyncRead and +/// AsyncWrite traits. #[pin_project::pin_project] // This generates a projection for the inner type. pub struct TokioIO(#[pin] pub T) where diff --git a/src/runtime/glommio/mod.rs b/src/runtime/glommio/mod.rs index e33efe7e..6d52bfb3 100644 --- a/src/runtime/glommio/mod.rs +++ b/src/runtime/glommio/mod.rs @@ -9,16 +9,12 @@ pub(crate) mod mpsc; pub(crate) mod compat { pub use super::{ broadcast::{BroadcastReceiver, BroadcastSender}, - mpsc::{rx_stream, UnboundedReceiver, UnboundedSender}, + mpsc::{channel, Receiver, Sender}, }; - use crate::error::RedisError; + use crate::error::Error; use futures::Future; use glommio::TaskQueueHandle; - pub use glommio::{ - channels::local_channel::new_unbounded as unbounded_channel, - task::JoinHandle as GlommioJoinHandle, - timer::sleep, - }; + pub use glommio::{task::JoinHandle as GlommioJoinHandle, timer::sleep}; pub use oneshot::{channel as oneshot_channel, Receiver as OneshotReceiver, Sender as OneshotSender}; use std::{ cell::Cell, @@ -81,7 +77,7 @@ pub(crate) mod compat { // map from futures_lite::Future to std::future::Future impl Future for JoinHandle { - type Output = Result; + type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { use futures_lite::FutureExt; @@ -91,7 +87,7 @@ pub(crate) mod compat { .get_mut() .inner .poll(cx) - .map(|result| result.ok_or(RedisError::new_canceled())); + .map(|result| result.ok_or(Error::new_canceled())); if let Poll::Ready(_) = result { finished.replace(true); diff --git a/src/runtime/glommio/mpsc.rs b/src/runtime/glommio/mpsc.rs index 17b4122d..4030e24e 100644 --- a/src/runtime/glommio/mpsc.rs +++ b/src/runtime/glommio/mpsc.rs @@ -1,7 +1,6 @@ -use crate::error::{RedisError, RedisErrorKind}; use futures::Stream; use glommio::{ - channels::local_channel::{LocalReceiver, LocalSender}, + channels::local_channel::{new_bounded, new_unbounded, LocalReceiver, LocalSender}, GlommioError, }; use std::{ @@ -11,7 +10,15 @@ use std::{ task::{Context, Poll}, }; -pub type UnboundedReceiver = LocalReceiver; +pub fn channel(size: usize) -> (Sender, Receiver) { + if size == 0 { + let (tx, rx) = new_unbounded(); + (tx.into(), rx.into()) + } else { + let (tx, rx) = new_bounded(size); + (tx.into(), rx.into()) + } +} pub struct UnboundedReceiverStream { rx: LocalReceiver, @@ -35,49 +42,54 @@ impl Stream for UnboundedReceiverStream { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { use futures_lite::stream::StreamExt; - - // TODO make sure this is cancellation-safe. it's a bit unclear why the internal impl of ChannelStream does what - // it does. self.rx.stream().poll_next(cx) } } -pub struct UnboundedSender { +pub struct Receiver { + rx: LocalReceiver, +} + +impl From> for Receiver { + fn from(rx: LocalReceiver) -> Self { + Receiver { rx } + } +} + +impl Receiver { + pub async fn recv(&mut self) -> Option { + self.rx.recv().await + } + + pub fn into_stream(self) -> impl Stream + 'static { + // what happens if we `join` the futures from `recv()` and `rx.stream().next()`? + UnboundedReceiverStream::from(self.rx) + } +} + +pub struct Sender { tx: Rc>, } // https://github.com/rust-lang/rust/issues/26925 -impl Clone for UnboundedSender { +impl Clone for Sender { fn clone(&self) -> Self { - UnboundedSender { tx: self.tx.clone() } + Sender { tx: self.tx.clone() } } } -impl From> for UnboundedSender { +impl From> for Sender { fn from(tx: LocalSender) -> Self { - UnboundedSender { tx: Rc::new(tx) } + Sender { tx: Rc::new(tx) } } } -impl UnboundedSender { +impl Sender { pub fn try_send(&self, msg: T) -> Result<(), GlommioError> { self.tx.try_send(msg) } - pub fn send(&self, msg: T) -> Result<(), RedisError> { - if let Err(_e) = self.tx.deref().try_send(msg) { - // shouldn't happen since we use unbounded channels - Err(RedisError::new( - RedisErrorKind::Canceled, - "Failed to send message on channel.", - )) - } else { - Ok(()) - } + pub async fn send(&self, msg: T) -> Result<(), GlommioError> { + self.tx.deref().send(msg).await } } - -pub fn rx_stream(rx: LocalReceiver) -> impl Stream + 'static { - // what happens if we `join` the futures from `recv()` and `rx.stream().next()`? - UnboundedReceiverStream::from(rx) -} diff --git a/src/trace/README.md b/src/trace/README.md index 354d0a20..6fa7e4f5 100644 --- a/src/trace/README.md +++ b/src/trace/README.md @@ -42,7 +42,6 @@ Tracing levels for the two tracing features can be configured separately through | cmd.req | The size (in bytes) of the command's arguments. | | cmd.res | The size (in bytes) of the command's response. | | cmd.args | The number of arguments being sent to the server. | -| cmd.pipelined | Whether the command was pipelined. | | cmd.flush | Whether the socket was flushed while sending the command. | | msg.channel | The channel on which a pubsub message was received. | | duration | The duration of a pause, in milliseconds, of a backpressure event. | \ No newline at end of file diff --git a/src/trace/disabled.rs b/src/trace/disabled.rs index c5af3b78..bd715487 100644 --- a/src/trace/disabled.rs +++ b/src/trace/disabled.rs @@ -1,9 +1,9 @@ #![allow(dead_code)] #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] -use crate::modules::inner::RedisClientInner; +use crate::modules::inner::ClientInner; #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] -use crate::protocol::command::RedisCommand; +use crate::protocol::command::Command; #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] use crate::runtime::RefCount; #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] @@ -23,12 +23,12 @@ impl Span { } #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] -pub fn set_network_span(_inner: &RefCount, _command: &mut RedisCommand, _flush: bool) {} +pub fn set_network_span(_inner: &RefCount, _command: &mut Command, _flush: bool) {} #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] -pub fn create_pubsub_span(_inner: &RefCount, _frame: &Frame) -> Option { +pub fn create_pubsub_span(_inner: &RefCount, _frame: &Frame) -> Option { Some(Span {}) } #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] -pub fn backpressure_event(_cmd: &RedisCommand, _: Option) {} +pub fn backpressure_event(_cmd: &Command, _: Option) {} diff --git a/src/trace/enabled.rs b/src/trace/enabled.rs index 719c6d9e..89ee2305 100644 --- a/src/trace/enabled.rs +++ b/src/trace/enabled.rs @@ -1,4 +1,4 @@ -use crate::{modules::inner::RedisClientInner, protocol::command::RedisCommand, runtime::RefCount}; +use crate::{modules::inner::ClientInner, protocol::command::Command, runtime::RefCount}; use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame}; use std::{fmt, ops::Deref}; pub use tracing::span::Span; @@ -42,7 +42,7 @@ impl fmt::Debug for CommandTraces { } } -pub fn set_network_span(inner: &RefCount, command: &mut RedisCommand, flush: bool) { +pub fn set_network_span(inner: &RefCount, command: &mut Command, flush: bool) { trace!("Setting network span from command {}", command.debug_id()); let span = fspan!(command, inner.tracing_span_level(), "fred.rtt", "cmd.flush" = flush); span.in_scope(|| {}); @@ -51,10 +51,10 @@ pub fn set_network_span(inner: &RefCount, command: &mut RedisC pub fn record_response_size(span: &Span, frame: &Resp3Frame) { #[allow(clippy::needless_borrows_for_generic_args)] - span.record("cmd.res", &frame.encode_len()); + span.record("cmd.res", &frame.encode_len(true)); } -pub fn create_command_span(inner: &RefCount) -> Span { +pub fn create_command_span(inner: &RefCount) -> Span { span_lvl!( inner.tracing_span_level(), "fred.command", @@ -67,28 +67,28 @@ pub fn create_command_span(inner: &RefCount) -> Span { } #[cfg(feature = "full-tracing")] -pub fn create_args_span(parent: Option, inner: &RefCount) -> Span { +pub fn create_args_span(parent: Option, inner: &RefCount) -> Span { span_lvl!(inner.full_tracing_span_level(), parent: parent, "fred.prepare", "cmd.args" = Empty) } #[cfg(not(feature = "full-tracing"))] -pub fn create_args_span(_parent: Option, _inner: &RefCount) -> FakeSpan { +pub fn create_args_span(_parent: Option, _inner: &RefCount) -> FakeSpan { FakeSpan {} } #[cfg(feature = "full-tracing")] -pub fn create_queued_span(parent: Option, inner: &RefCount) -> Span { +pub fn create_queued_span(parent: Option, inner: &RefCount) -> Span { let buf_len = inner.counters.read_cmd_buffer_len(); span_lvl!(inner.full_tracing_span_level(), parent: parent, "fred.queued", buf_len) } #[cfg(not(feature = "full-tracing"))] -pub fn create_queued_span(_parent: Option, _inner: &RefCount) -> FakeSpan { +pub fn create_queued_span(_parent: Option, _inner: &RefCount) -> FakeSpan { FakeSpan {} } #[cfg(feature = "full-tracing")] -pub fn create_pubsub_span(inner: &RefCount, frame: &Resp3Frame) -> Option { +pub fn create_pubsub_span(inner: &RefCount, frame: &Resp3Frame) -> Option { if inner.should_trace() { let span = span_lvl!( inner.full_tracing_span_level(), @@ -96,7 +96,7 @@ pub fn create_pubsub_span(inner: &RefCount, frame: &Resp3Frame "fred.pubsub", module = "fred", "client.id" = &inner.id.deref(), - "cmd.res" = &frame.encode_len(), + "cmd.res" = &frame.encode_len(true), "msg.channel" = Empty ); @@ -107,11 +107,11 @@ pub fn create_pubsub_span(inner: &RefCount, frame: &Resp3Frame } #[cfg(not(feature = "full-tracing"))] -pub fn create_pubsub_span(_inner: &RefCount, _frame: &Resp3Frame) -> Option { +pub fn create_pubsub_span(_inner: &RefCount, _frame: &Resp3Frame) -> Option { Some(FakeSpan {}) } -pub fn backpressure_event(cmd: &RedisCommand, duration: Option) { +pub fn backpressure_event(cmd: &Command, duration: Option) { let id = cmd.traces.cmd.as_ref().and_then(|c| c.id()); if let Some(duration) = duration { event!(parent: id, Level::INFO, "fred.backpressure duration={}", duration); diff --git a/src/types/args.rs b/src/types/args.rs index 1829227b..d207e1ff 100644 --- a/src/types/args.rs +++ b/src/types/args.rs @@ -1,22 +1,14 @@ -#[cfg(feature = "i-scripts")] -use crate::types::Function; -#[cfg(feature = "i-geo")] -use crate::types::{GeoPosition, GeoRadiusInfo}; -#[cfg(feature = "i-streams")] -use crate::types::{XReadResponse, XReadValue}; use crate::{ - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces::{ClientLike, Resp3Frame}, protocol::{connection::OK, utils as protocol_utils}, - types::{FromRedis, FromRedisKey, Server, QUEUED}, + types::{config::Server, FromKey, FromValue, QUEUED}, utils, }; use bytes::Bytes; use bytes_utils::Str; use float_cmp::approx_eq; use redis_protocol::resp2::types::NULL; -#[cfg(feature = "serde-json")] -use serde_json::Value; use std::{ borrow::Cow, collections::{BTreeMap, HashMap, HashSet, VecDeque}, @@ -29,6 +21,13 @@ use std::{ str, }; +#[cfg(feature = "i-geo")] +use crate::types::geo::{GeoPosition, GeoRadiusInfo}; +#[cfg(feature = "i-scripts")] +use crate::types::scripts::Function; +#[cfg(feature = "i-streams")] +use crate::types::streams::{XReadResponse, XReadValue}; + static TRUE_STR: Str = utils::static_str("true"); static FALSE_STR: Str = utils::static_str("false"); @@ -42,11 +41,11 @@ macro_rules! impl_string_or_number( } ); -macro_rules! impl_from_str_for_redis_key( +macro_rules! impl_from_str_for_key( ($t:ty) => { - impl From<$t> for RedisKey { + impl From<$t> for Key { fn from(val: $t) -> Self { - RedisKey { key: val.to_string().into() } + Key { key: val.to_string().into() } } } } @@ -88,25 +87,25 @@ impl StringOrNumber { } #[cfg(feature = "i-streams")] - pub(crate) fn into_arg(self) -> RedisValue { + pub(crate) fn into_arg(self) -> Value { match self { - StringOrNumber::String(s) => RedisValue::String(s), - StringOrNumber::Number(n) => RedisValue::Integer(n), - StringOrNumber::Double(f) => RedisValue::Double(f), + StringOrNumber::String(s) => Value::String(s), + StringOrNumber::Number(n) => Value::Integer(n), + StringOrNumber::Double(f) => Value::Double(f), } } } -impl TryFrom for StringOrNumber { - type Error = RedisError; +impl TryFrom for StringOrNumber { + type Error = Error; - fn try_from(value: RedisValue) -> Result { + fn try_from(value: Value) -> Result { let val = match value { - RedisValue::String(s) => StringOrNumber::String(s), - RedisValue::Integer(i) => StringOrNumber::Number(i), - RedisValue::Double(f) => StringOrNumber::Double(f), - RedisValue::Bytes(b) => StringOrNumber::String(Str::from_inner(b)?), - _ => return Err(RedisError::new(RedisErrorKind::InvalidArgument, "")), + Value::String(s) => StringOrNumber::String(s), + Value::Integer(i) => StringOrNumber::Number(i), + Value::Double(f) => StringOrNumber::Double(f), + Value::Bytes(b) => StringOrNumber::String(Str::from_inner(b)?), + _ => return Err(Error::new(ErrorKind::InvalidArgument, "")), }; Ok(val) @@ -154,23 +153,23 @@ impl From for StringOrNumber { } } -/// A key in Redis. +/// A key identifying a [Value](crate::types::Value). #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct RedisKey { +pub struct Key { key: Bytes, } -impl RedisKey { - /// Create a new `RedisKey` from static bytes without copying. +impl Key { + /// Create a new `Key` from static bytes without copying. pub const fn from_static(b: &'static [u8]) -> Self { - RedisKey { + Key { key: Bytes::from_static(b), } } - /// Create a new `RedisKey` from a `&'static str` without copying. + /// Create a new `Key` from a `&'static str` without copying. pub const fn from_static_str(b: &'static str) -> Self { - RedisKey { + Key { key: Bytes::from_static(b.as_bytes()), } } @@ -238,146 +237,141 @@ impl RedisKey { self.key.split_to(self.key.len()) } - /// Attempt to convert the key to any type that implements [FromRedisKey](crate::types::FromRedisKey). + /// Attempt to convert the key to any type that implements [FromKey](crate::types::FromKey). /// - /// See the [RedisValue::convert](crate::types::RedisValue::convert) documentation for more information. - pub fn convert(self) -> Result + /// See the [Value::convert](crate::types::Value::convert) documentation for more information. + pub fn convert(self) -> Result where - K: FromRedisKey, + K: FromKey, { K::from_key(self) } } -impl TryFrom for RedisKey { - type Error = RedisError; +impl TryFrom for Key { + type Error = Error; - fn try_from(value: RedisValue) -> Result { + fn try_from(value: Value) -> Result { let val = match value { - RedisValue::String(s) => RedisKey { key: s.into_inner() }, - RedisValue::Integer(i) => RedisKey { + Value::String(s) => Key { key: s.into_inner() }, + Value::Integer(i) => Key { key: i.to_string().into(), }, - RedisValue::Double(f) => RedisKey { + Value::Double(f) => Key { key: f.to_string().into(), }, - RedisValue::Bytes(b) => RedisKey { key: b }, - RedisValue::Boolean(b) => match b { - true => RedisKey { + Value::Bytes(b) => Key { key: b }, + Value::Boolean(b) => match b { + true => Key { key: TRUE_STR.clone().into_inner(), }, - false => RedisKey { + false => Key { key: FALSE_STR.clone().into_inner(), }, }, - RedisValue::Queued => utils::static_str(QUEUED).into(), - _ => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "Cannot convert to key.", - )) - }, + Value::Queued => utils::static_str(QUEUED).into(), + _ => return Err(Error::new(ErrorKind::InvalidArgument, "Cannot convert to key.")), }; Ok(val) } } -impl From for RedisKey { +impl From for Key { fn from(b: Bytes) -> Self { - RedisKey { key: b } + Key { key: b } } } -impl From> for RedisKey { +impl From> for Key { fn from(b: Box<[u8]>) -> Self { - RedisKey { key: b.into() } + Key { key: b.into() } } } -impl<'a> From<&'a [u8]> for RedisKey { +impl<'a> From<&'a [u8]> for Key { fn from(b: &'a [u8]) -> Self { - RedisKey { key: b.to_vec().into() } + Key { key: b.to_vec().into() } } } -impl From for RedisKey { +impl From for Key { fn from(s: String) -> Self { - RedisKey { key: s.into() } + Key { key: s.into() } } } -impl From<&str> for RedisKey { +impl From<&str> for Key { fn from(s: &str) -> Self { - RedisKey { + Key { key: s.as_bytes().to_vec().into(), } } } -impl From<&String> for RedisKey { +impl From<&String> for Key { fn from(s: &String) -> Self { - RedisKey { key: s.clone().into() } + Key { key: s.clone().into() } } } -impl From for RedisKey { +impl From for Key { fn from(s: Str) -> Self { - RedisKey { key: s.into_inner() } + Key { key: s.into_inner() } } } -impl From<&Str> for RedisKey { +impl From<&Str> for Key { fn from(s: &Str) -> Self { - RedisKey { key: s.inner().clone() } + Key { key: s.inner().clone() } } } -impl From<&RedisKey> for RedisKey { - fn from(k: &RedisKey) -> RedisKey { +impl From<&Key> for Key { + fn from(k: &Key) -> Key { k.clone() } } -impl From for RedisKey { +impl From for Key { fn from(b: bool) -> Self { match b { - true => RedisKey::from_static_str("true"), - false => RedisKey::from_static_str("false"), + true => Key::from_static_str("true"), + false => Key::from_static_str("false"), } } } -impl_from_str_for_redis_key!(u8); -impl_from_str_for_redis_key!(u16); -impl_from_str_for_redis_key!(u32); -impl_from_str_for_redis_key!(u64); -impl_from_str_for_redis_key!(u128); -impl_from_str_for_redis_key!(usize); -impl_from_str_for_redis_key!(i8); -impl_from_str_for_redis_key!(i16); -impl_from_str_for_redis_key!(i32); -impl_from_str_for_redis_key!(i64); -impl_from_str_for_redis_key!(i128); -impl_from_str_for_redis_key!(isize); -impl_from_str_for_redis_key!(f32); -impl_from_str_for_redis_key!(f64); +impl_from_str_for_key!(u8); +impl_from_str_for_key!(u16); +impl_from_str_for_key!(u32); +impl_from_str_for_key!(u64); +impl_from_str_for_key!(u128); +impl_from_str_for_key!(usize); +impl_from_str_for_key!(i8); +impl_from_str_for_key!(i16); +impl_from_str_for_key!(i32); +impl_from_str_for_key!(i64); +impl_from_str_for_key!(i128); +impl_from_str_for_key!(isize); +impl_from_str_for_key!(f32); +impl_from_str_for_key!(f64); -/// A map of `(RedisKey, RedisValue)` pairs. +/// A map of `(Key, Value)` pairs. #[derive(Clone, Debug, Eq, PartialEq)] -pub struct RedisMap { - pub(crate) inner: HashMap, +pub struct Map { + pub(crate) inner: HashMap, } -impl RedisMap { +impl Map { /// Create a new empty map. pub fn new() -> Self { - RedisMap { inner: HashMap::new() } + Map { inner: HashMap::new() } } /// Replace the value an empty map, returning the original value. pub fn take(&mut self) -> Self { - RedisMap { + Map { inner: mem::take(&mut self.inner), } } @@ -388,111 +382,111 @@ impl RedisMap { } /// Take the inner `HashMap`. - pub fn inner(self) -> HashMap { + pub fn inner(self) -> HashMap { self.inner } } -impl Deref for RedisMap { - type Target = HashMap; +impl Deref for Map { + type Target = HashMap; fn deref(&self) -> &Self::Target { &self.inner } } -impl DerefMut for RedisMap { +impl DerefMut for Map { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> From<&'a RedisMap> for RedisMap { - fn from(vals: &'a RedisMap) -> Self { +impl<'a> From<&'a Map> for Map { + fn from(vals: &'a Map) -> Self { vals.clone() } } -impl TryFrom> for RedisMap +impl TryFrom> for Map where - K: TryInto, - K::Error: Into, - V: TryInto, - V::Error: Into, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(value: HashMap) -> Result { - Ok(RedisMap { - inner: utils::into_redis_map(value.into_iter())?, + Ok(Map { + inner: utils::into_map(value.into_iter())?, }) } } -impl TryFrom> for RedisMap +impl TryFrom> for Map where - K: TryInto, - K::Error: Into, - V: TryInto, - V::Error: Into, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(value: BTreeMap) -> Result { - Ok(RedisMap { - inner: utils::into_redis_map(value.into_iter())?, + Ok(Map { + inner: utils::into_map(value.into_iter())?, }) } } -impl From<()> for RedisMap { +impl From<()> for Map { fn from(_: ()) -> Self { - RedisMap::new() + Map::new() } } -impl TryFrom<(K, V)> for RedisMap +impl TryFrom<(K, V)> for Map where - K: TryInto, - K::Error: Into, - V: TryInto, - V::Error: Into, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from((key, value): (K, V)) -> Result { let mut inner = HashMap::with_capacity(1); inner.insert(to!(key)?, to!(value)?); - Ok(RedisMap { inner }) + Ok(Map { inner }) } } -impl TryFrom> for RedisMap +impl TryFrom> for Map where - K: TryInto, - K::Error: Into, - V: TryInto, - V::Error: Into, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(values: Vec<(K, V)>) -> Result { let mut inner = HashMap::with_capacity(values.len()); for (key, value) in values.into_iter() { inner.insert(to!(key)?, to!(value)?); } - Ok(RedisMap { inner }) + Ok(Map { inner }) } } -impl TryFrom<[(K, V); N]> for RedisMap +impl TryFrom<[(K, V); N]> for Map where - K: TryInto, - K::Error: Into, - V: TryInto, - V::Error: Into, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(value: [(K, V); N]) -> Result { let mut inner = HashMap::with_capacity(value.len()); @@ -500,17 +494,17 @@ where inner.insert(to!(key)?, to!(value)?); } - Ok(RedisMap { inner }) + Ok(Map { inner }) } } -impl<'a, K, V, const N: usize> TryFrom<&'a [(K, V); N]> for RedisMap +impl<'a, K, V, const N: usize> TryFrom<&'a [(K, V); N]> for Map where - K: TryInto + Clone, - K::Error: Into, - V: TryInto + Clone, - V::Error: Into, + K: TryInto + Clone, + K::Error: Into, + V: TryInto + Clone, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(value: &'a [(K, V); N]) -> Result { let mut inner = HashMap::with_capacity(value.len()); @@ -519,32 +513,32 @@ where inner.insert(to!(key)?, to!(value)?); } - Ok(RedisMap { inner }) + Ok(Map { inner }) } } -impl TryFrom> for RedisMap +impl TryFrom> for Map where - K: TryInto, - K::Error: Into, - V: TryInto, - V::Error: Into, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(values: VecDeque<(K, V)>) -> Result { let mut inner = HashMap::with_capacity(values.len()); for (key, value) in values.into_iter() { inner.insert(to!(key)?, to!(value)?); } - Ok(RedisMap { inner }) + Ok(Map { inner }) } } -impl FromIterator<(K, V)> for RedisMap +impl FromIterator<(K, V)> for Map where - K: Into, - V: Into, + K: Into, + V: Into, { fn from_iter>(iter: T) -> Self { Self { @@ -553,9 +547,9 @@ where } } -/// The kind of value from Redis. +/// The kind of value from the server. #[derive(Clone, Debug, Eq, PartialEq)] -pub enum RedisValueKind { +pub enum ValueKind { Boolean, Integer, Double, @@ -567,27 +561,27 @@ pub enum RedisValueKind { Array, } -impl fmt::Display for RedisValueKind { +impl fmt::Display for ValueKind { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let s = match *self { - RedisValueKind::Boolean => "Boolean", - RedisValueKind::Integer => "Integer", - RedisValueKind::Double => "Double", - RedisValueKind::String => "String", - RedisValueKind::Bytes => "Bytes", - RedisValueKind::Null => "nil", - RedisValueKind::Queued => "Queued", - RedisValueKind::Map => "Map", - RedisValueKind::Array => "Array", + ValueKind::Boolean => "Boolean", + ValueKind::Integer => "Integer", + ValueKind::Double => "Double", + ValueKind::String => "String", + ValueKind::Bytes => "Bytes", + ValueKind::Null => "nil", + ValueKind::Queued => "Queued", + ValueKind::Map => "Map", + ValueKind::Array => "Array", }; write!(f, "{}", s) } } -/// A value used in a Redis command. +/// A value used in arguments or response types. #[derive(Clone, Debug)] -pub enum RedisValue { +pub enum Value { /// A boolean value. Boolean(bool), /// An integer value. @@ -603,17 +597,17 @@ pub enum RedisValue { /// A special value used to indicate a MULTI block command was received by the server. Queued, /// A map of key/value pairs, primarily used in RESP3 mode. - Map(RedisMap), + Map(Map), /// An ordered list of values. /// /// In RESP2 mode the server usually sends map structures as an array of key/value pairs. - Array(Vec), + Array(Vec), } #[allow(clippy::match_like_matches_macro)] -impl PartialEq for RedisValue { +impl PartialEq for Value { fn eq(&self, other: &Self) -> bool { - use RedisValue::*; + use Value::*; match self { Boolean(ref s) => match other { @@ -656,20 +650,20 @@ impl PartialEq for RedisValue { } } -impl Eq for RedisValue {} +impl Eq for Value {} -impl RedisValue { - /// Create a new `RedisValue::Bytes` from a static byte slice without copying. +impl Value { + /// Create a new `Value::Bytes` from a static byte slice without copying. pub fn from_static(b: &'static [u8]) -> Self { - RedisValue::Bytes(Bytes::from_static(b)) + Value::Bytes(Bytes::from_static(b)) } - /// Create a new `RedisValue::String` from a static `str` without copying. + /// Create a new `Value::String` from a static `str` without copying. pub fn from_static_str(s: &'static str) -> Self { - RedisValue::String(utils::static_str(s)) + Value::String(utils::static_str(s)) } - /// Create a new `RedisValue` with the `OK` status. + /// Create a new `Value` with the `OK` status. pub fn new_ok() -> Self { Self::from_static_str(OK) } @@ -677,66 +671,66 @@ impl RedisValue { /// Whether the value is a simple string OK value. pub fn is_ok(&self) -> bool { match *self { - RedisValue::String(ref s) => *s == OK, + Value::String(ref s) => *s == OK, _ => false, } } /// Attempt to convert the value into an integer, returning the original string as an error if the parsing fails. - pub fn into_integer(self) -> Result { + pub fn into_integer(self) -> Result { match self { - RedisValue::String(s) => match s.parse::() { - Ok(i) => Ok(RedisValue::Integer(i)), - Err(_) => Err(RedisValue::String(s)), + Value::String(s) => match s.parse::() { + Ok(i) => Ok(Value::Integer(i)), + Err(_) => Err(Value::String(s)), }, - RedisValue::Integer(i) => Ok(RedisValue::Integer(i)), + Value::Integer(i) => Ok(Value::Integer(i)), _ => Err(self), } } /// Read the type of the value without any associated data. - pub fn kind(&self) -> RedisValueKind { + pub fn kind(&self) -> ValueKind { match *self { - RedisValue::Boolean(_) => RedisValueKind::Boolean, - RedisValue::Integer(_) => RedisValueKind::Integer, - RedisValue::Double(_) => RedisValueKind::Double, - RedisValue::String(_) => RedisValueKind::String, - RedisValue::Bytes(_) => RedisValueKind::Bytes, - RedisValue::Null => RedisValueKind::Null, - RedisValue::Queued => RedisValueKind::Queued, - RedisValue::Map(_) => RedisValueKind::Map, - RedisValue::Array(_) => RedisValueKind::Array, + Value::Boolean(_) => ValueKind::Boolean, + Value::Integer(_) => ValueKind::Integer, + Value::Double(_) => ValueKind::Double, + Value::String(_) => ValueKind::String, + Value::Bytes(_) => ValueKind::Bytes, + Value::Null => ValueKind::Null, + Value::Queued => ValueKind::Queued, + Value::Map(_) => ValueKind::Map, + Value::Array(_) => ValueKind::Array, } } /// Check if the value is null. pub fn is_null(&self) -> bool { - matches!(*self, RedisValue::Null) + matches!(*self, Value::Null) } /// Check if the value is an integer. pub fn is_integer(&self) -> bool { - matches!(self, RedisValue::Integer(_)) + matches!(self, Value::Integer(_)) } /// Check if the value is a string. pub fn is_string(&self) -> bool { - matches!(*self, RedisValue::String(_)) + matches!(*self, Value::String(_)) } /// Check if the value is an array of bytes. pub fn is_bytes(&self) -> bool { - matches!(*self, RedisValue::Bytes(_)) + matches!(*self, Value::Bytes(_)) } /// Whether the value is a boolean value or can be parsed as a boolean value. #[allow(clippy::match_like_matches_macro)] pub fn is_boolean(&self) -> bool { match *self { - RedisValue::Boolean(_) => true, - RedisValue::Integer(0 | 1) => true, - RedisValue::Integer(_) => false, - RedisValue::String(ref s) => match s.as_bytes() { + Value::Boolean(_) => true, + Value::Integer(0 | 1) => true, + Value::Integer(_) => false, + Value::String(ref s) => match s.as_bytes() { b"true" | b"false" | b"t" | b"f" | b"TRUE" | b"FALSE" | b"T" | b"F" | b"1" | b"0" => true, _ => false, }, @@ -747,60 +741,60 @@ impl RedisValue { /// Whether the inner value is a double or can be parsed as a double. pub fn is_double(&self) -> bool { match *self { - RedisValue::Double(_) => true, - RedisValue::String(ref s) => utils::redis_string_to_f64(s).is_ok(), + Value::Double(_) => true, + Value::String(ref s) => utils::string_to_f64(s).is_ok(), _ => false, } } /// Check if the value is a `QUEUED` response. pub fn is_queued(&self) -> bool { - matches!(*self, RedisValue::Queued) + matches!(*self, Value::Queued) } /// Whether the value is an array or map. pub fn is_aggregate_type(&self) -> bool { - matches!(*self, RedisValue::Array(_) | RedisValue::Map(_)) + matches!(*self, Value::Array(_) | Value::Map(_)) } - /// Whether the value is a `RedisMap`. + /// Whether the value is a `Map`. /// /// See [is_maybe_map](Self::is_maybe_map) for a function that also checks for arrays that likely represent a map in /// RESP2 mode. pub fn is_map(&self) -> bool { - matches!(*self, RedisValue::Map(_)) + matches!(*self, Value::Map(_)) } - /// Whether the value is a `RedisMap` or an array with an even number of elements where each even-numbered + /// Whether the value is a `Map` or an array with an even number of elements where each even-numbered /// element is not an aggregate type. /// /// RESP2 and RESP3 encode maps differently, and this function can be used to duck-type maps across protocol /// versions. pub fn is_maybe_map(&self) -> bool { match *self { - RedisValue::Map(_) => true, - RedisValue::Array(ref arr) => utils::is_maybe_array_map(arr), + Value::Map(_) => true, + Value::Array(ref arr) => utils::is_maybe_array_map(arr), _ => false, } } /// Whether the value is an array. pub fn is_array(&self) -> bool { - matches!(*self, RedisValue::Array(_)) + matches!(*self, Value::Array(_)) } /// Read and return the inner value as a `u64`, if possible. pub fn as_u64(&self) -> Option { match self { - RedisValue::Integer(ref i) => { + Value::Integer(ref i) => { if *i >= 0 { Some(*i as u64) } else { None } }, - RedisValue::String(ref s) => s.parse::().ok(), - RedisValue::Array(ref inner) => { + Value::String(ref s) => s.parse::().ok(), + Value::Array(ref inner) => { if inner.len() == 1 { inner.first().and_then(|v| v.as_u64()) } else { @@ -808,9 +802,9 @@ impl RedisValue { } }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Some(0), + Value::Null => Some(0), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => None, + Value::Null => None, _ => None, } } @@ -818,9 +812,9 @@ impl RedisValue { /// Read and return the inner value as a `i64`, if possible. pub fn as_i64(&self) -> Option { match self { - RedisValue::Integer(ref i) => Some(*i), - RedisValue::String(ref s) => s.parse::().ok(), - RedisValue::Array(ref inner) => { + Value::Integer(ref i) => Some(*i), + Value::String(ref s) => s.parse::().ok(), + Value::Array(ref inner) => { if inner.len() == 1 { inner.first().and_then(|v| v.as_i64()) } else { @@ -828,9 +822,9 @@ impl RedisValue { } }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Some(0), + Value::Null => Some(0), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => None, + Value::Null => None, _ => None, } } @@ -838,15 +832,15 @@ impl RedisValue { /// Read and return the inner value as a `usize`, if possible. pub fn as_usize(&self) -> Option { match self { - RedisValue::Integer(i) => { + Value::Integer(i) => { if *i >= 0 { Some(*i as usize) } else { None } }, - RedisValue::String(ref s) => s.parse::().ok(), - RedisValue::Array(ref inner) => { + Value::String(ref s) => s.parse::().ok(), + Value::Array(ref inner) => { if inner.len() == 1 { inner.first().and_then(|v| v.as_usize()) } else { @@ -854,9 +848,9 @@ impl RedisValue { } }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Some(0), + Value::Null => Some(0), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => None, + Value::Null => None, _ => None, } } @@ -864,10 +858,10 @@ impl RedisValue { /// Read and return the inner value as a `f64`, if possible. pub fn as_f64(&self) -> Option { match self { - RedisValue::Double(ref f) => Some(*f), - RedisValue::String(ref s) => utils::redis_string_to_f64(s).ok(), - RedisValue::Integer(ref i) => Some(*i as f64), - RedisValue::Array(ref inner) => { + Value::Double(ref f) => Some(*f), + Value::String(ref s) => utils::string_to_f64(s).ok(), + Value::Integer(ref i) => Some(*i as f64), + Value::Array(ref inner) => { if inner.len() == 1 { inner.first().and_then(|v| v.as_f64()) } else { @@ -875,9 +869,9 @@ impl RedisValue { } }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Some(0.0), + Value::Null => Some(0.0), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => None, + Value::Null => None, _ => None, } } @@ -885,13 +879,13 @@ impl RedisValue { /// Read and return the inner `String` if the value is a string or scalar value. pub fn into_string(self) -> Option { match self { - RedisValue::Boolean(b) => Some(b.to_string()), - RedisValue::Double(f) => Some(f.to_string()), - RedisValue::String(s) => Some(s.to_string()), - RedisValue::Bytes(b) => String::from_utf8(b.to_vec()).ok(), - RedisValue::Integer(i) => Some(i.to_string()), - RedisValue::Queued => Some(QUEUED.to_owned()), - RedisValue::Array(mut inner) => { + Value::Boolean(b) => Some(b.to_string()), + Value::Double(f) => Some(f.to_string()), + Value::String(s) => Some(s.to_string()), + Value::Bytes(b) => String::from_utf8(b.to_vec()).ok(), + Value::Integer(i) => Some(i.to_string()), + Value::Queued => Some(QUEUED.to_owned()), + Value::Array(mut inner) => { if inner.len() == 1 { inner.pop().and_then(|v| v.into_string()) } else { @@ -899,9 +893,9 @@ impl RedisValue { } }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Some(String::new()), + Value::Null => Some(String::new()), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => None, + Value::Null => None, _ => None, } } @@ -909,16 +903,16 @@ impl RedisValue { /// Read and return the inner data as a `Str` from the `bytes` crate. pub fn into_bytes_str(self) -> Option { match self { - RedisValue::Boolean(b) => match b { + Value::Boolean(b) => match b { true => Some(TRUE_STR.clone()), false => Some(FALSE_STR.clone()), }, - RedisValue::Double(f) => Some(f.to_string().into()), - RedisValue::String(s) => Some(s), - RedisValue::Bytes(b) => Str::from_inner(b).ok(), - RedisValue::Integer(i) => Some(i.to_string().into()), - RedisValue::Queued => Some(utils::static_str(QUEUED)), - RedisValue::Array(mut inner) => { + Value::Double(f) => Some(f.to_string().into()), + Value::String(s) => Some(s), + Value::Bytes(b) => Str::from_inner(b).ok(), + Value::Integer(i) => Some(i.to_string().into()), + Value::Queued => Some(utils::static_str(QUEUED)), + Value::Array(mut inner) => { if inner.len() == 1 { inner.pop().and_then(|v| v.into_bytes_str()) } else { @@ -926,9 +920,9 @@ impl RedisValue { } }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Some(Str::new()), + Value::Null => Some(Str::new()), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => None, + Value::Null => None, _ => None, } } @@ -936,16 +930,16 @@ impl RedisValue { /// Read the inner value as a `Str`. pub fn as_bytes_str(&self) -> Option { match self { - RedisValue::Boolean(ref b) => match *b { + Value::Boolean(ref b) => match *b { true => Some(TRUE_STR.clone()), false => Some(FALSE_STR.clone()), }, - RedisValue::Double(ref f) => Some(f.to_string().into()), - RedisValue::String(ref s) => Some(s.clone()), - RedisValue::Bytes(ref b) => Str::from_inner(b.clone()).ok(), - RedisValue::Integer(ref i) => Some(i.to_string().into()), - RedisValue::Queued => Some(utils::static_str(QUEUED)), - RedisValue::Array(ref inner) => { + Value::Double(ref f) => Some(f.to_string().into()), + Value::String(ref s) => Some(s.clone()), + Value::Bytes(ref b) => Str::from_inner(b.clone()).ok(), + Value::Integer(ref i) => Some(i.to_string().into()), + Value::Queued => Some(utils::static_str(QUEUED)), + Value::Array(ref inner) => { if inner.len() == 1 { inner[0].as_bytes_str() } else { @@ -953,9 +947,9 @@ impl RedisValue { } }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Some(Str::new()), + Value::Null => Some(Str::new()), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => None, + Value::Null => None, _ => None, } } @@ -965,16 +959,16 @@ impl RedisValue { /// Note: this will cast integers and doubles to strings. pub fn as_string(&self) -> Option { match self { - RedisValue::Boolean(ref b) => Some(b.to_string()), - RedisValue::Double(ref f) => Some(f.to_string()), - RedisValue::String(ref s) => Some(s.to_string()), - RedisValue::Bytes(ref b) => str::from_utf8(b).ok().map(|s| s.to_owned()), - RedisValue::Integer(ref i) => Some(i.to_string()), - RedisValue::Queued => Some(QUEUED.to_owned()), + Value::Boolean(ref b) => Some(b.to_string()), + Value::Double(ref f) => Some(f.to_string()), + Value::String(ref s) => Some(s.to_string()), + Value::Bytes(ref b) => str::from_utf8(b).ok().map(|s| s.to_owned()), + Value::Integer(ref i) => Some(i.to_string()), + Value::Queued => Some(QUEUED.to_owned()), #[cfg(feature = "default-nil-types")] - RedisValue::Null => Some(String::new()), + Value::Null => Some(String::new()), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => None, + Value::Null => None, _ => None, } } @@ -984,16 +978,16 @@ impl RedisValue { /// Null is returned as `"nil"` and scalar values are cast to a string. pub fn as_str(&self) -> Option> { let s: Cow = match *self { - RedisValue::Double(ref f) => Cow::Owned(f.to_string()), - RedisValue::Boolean(ref b) => Cow::Owned(b.to_string()), - RedisValue::String(ref s) => Cow::Borrowed(s.deref()), - RedisValue::Integer(ref i) => Cow::Owned(i.to_string()), - RedisValue::Queued => Cow::Borrowed(QUEUED), - RedisValue::Bytes(ref b) => return str::from_utf8(b).ok().map(Cow::Borrowed), + Value::Double(ref f) => Cow::Owned(f.to_string()), + Value::Boolean(ref b) => Cow::Owned(b.to_string()), + Value::String(ref s) => Cow::Borrowed(s.deref()), + Value::Integer(ref i) => Cow::Owned(i.to_string()), + Value::Queued => Cow::Borrowed(QUEUED), + Value::Bytes(ref b) => return str::from_utf8(b).ok().map(Cow::Borrowed), #[cfg(feature = "default-nil-types")] - RedisValue::Null => Cow::Borrowed(""), + Value::Null => Cow::Borrowed(""), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => return None, + Value::Null => return None, _ => return None, }; @@ -1003,16 +997,16 @@ impl RedisValue { /// Read the inner value as a string, using `String::from_utf8_lossy` on byte slices. pub fn as_str_lossy(&self) -> Option> { let s: Cow = match *self { - RedisValue::Boolean(ref b) => Cow::Owned(b.to_string()), - RedisValue::Double(ref f) => Cow::Owned(f.to_string()), - RedisValue::String(ref s) => Cow::Borrowed(s.deref()), - RedisValue::Integer(ref i) => Cow::Owned(i.to_string()), - RedisValue::Queued => Cow::Borrowed(QUEUED), - RedisValue::Bytes(ref b) => String::from_utf8_lossy(b), + Value::Boolean(ref b) => Cow::Owned(b.to_string()), + Value::Double(ref f) => Cow::Owned(f.to_string()), + Value::String(ref s) => Cow::Borrowed(s.deref()), + Value::Integer(ref i) => Cow::Owned(i.to_string()), + Value::Queued => Cow::Borrowed(QUEUED), + Value::Bytes(ref b) => String::from_utf8_lossy(b), #[cfg(feature = "default-nil-types")] - RedisValue::Null => Cow::Borrowed(""), + Value::Null => Cow::Borrowed(""), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => return None, + Value::Null => return None, _ => return None, }; @@ -1022,9 +1016,9 @@ impl RedisValue { /// Read the inner value as an array of bytes, if possible. pub fn as_bytes(&self) -> Option<&[u8]> { match *self { - RedisValue::String(ref s) => Some(s.as_bytes()), - RedisValue::Bytes(ref b) => Some(b), - RedisValue::Queued => Some(QUEUED.as_bytes()), + Value::String(ref s) => Some(s.as_bytes()), + Value::Bytes(ref b) => Some(b), + Value::Queued => Some(QUEUED.as_bytes()), _ => None, } } @@ -1032,18 +1026,18 @@ impl RedisValue { /// Attempt to convert the value to a `bool`. pub fn as_bool(&self) -> Option { match *self { - RedisValue::Boolean(b) => Some(b), - RedisValue::Integer(ref i) => match *i { + Value::Boolean(b) => Some(b), + Value::Integer(ref i) => match *i { 0 => Some(false), 1 => Some(true), _ => None, }, - RedisValue::String(ref s) => match s.as_bytes() { + Value::String(ref s) => match s.as_bytes() { b"true" | b"TRUE" | b"t" | b"T" | b"1" => Some(true), b"false" | b"FALSE" | b"f" | b"F" | b"0" => Some(false), _ => None, }, - RedisValue::Array(ref inner) => { + Value::Array(ref inner) => { if inner.len() == 1 { inner.first().and_then(|v| v.as_bool()) } else { @@ -1051,75 +1045,72 @@ impl RedisValue { } }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Some(false), + Value::Null => Some(false), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => None, + Value::Null => None, _ => None, } } - /// Attempt to convert this value to a Redis map if it's an array with an even number of elements. - pub fn into_map(self) -> Result { + /// Attempt to convert this value to a map if it's an array with an even number of elements. + pub fn into_map(self) -> Result { match self { - RedisValue::Map(map) => Ok(map), - RedisValue::Array(mut values) => { + Value::Map(map) => Ok(map), + Value::Array(mut values) => { if values.len() % 2 != 0 { - return Err(RedisError::new( - RedisErrorKind::Unknown, - "Expected an even number of elements.", - )); + return Err(Error::new(ErrorKind::Unknown, "Expected an even number of elements.")); } let mut inner = HashMap::with_capacity(values.len() / 2); while values.len() >= 2 { let value = values.pop().unwrap(); - let key: RedisKey = values.pop().unwrap().try_into()?; + let key: Key = values.pop().unwrap().try_into()?; inner.insert(key, value); } - Ok(RedisMap { inner }) + Ok(Map { inner }) }, #[cfg(feature = "default-nil-types")] - RedisValue::Null => Ok(RedisMap::new()), - _ => Err(RedisError::new(RedisErrorKind::Unknown, "Could not convert to map.")), + Value::Null => Ok(Map::new()), + _ => Err(Error::new(ErrorKind::Unknown, "Could not convert to map.")), } } - pub(crate) fn into_multiple_values(self) -> Vec { + pub(crate) fn into_multiple_values(self) -> Vec { match self { - RedisValue::Array(values) => values, - RedisValue::Map(map) => map + Value::Array(values) => values, + Value::Map(map) => map .inner() .into_iter() - .flat_map(|(k, v)| [RedisValue::Bytes(k.into_bytes()), v]) + .flat_map(|(k, v)| [Value::Bytes(k.into_bytes()), v]) .collect(), - RedisValue::Null => Vec::new(), + Value::Null => Vec::new(), _ => vec![self], } } /// Convert the array value to a set, if possible. - pub fn into_set(self) -> Result, RedisError> { + pub fn into_set(self) -> Result, Error> { match self { - RedisValue::Array(values) => Ok(values.into_iter().collect()), + Value::Array(values) => Ok(values.into_iter().collect()), #[cfg(feature = "default-nil-types")] - RedisValue::Null => Ok(HashSet::new()), - _ => Err(RedisError::new_parse("Could not convert to set.")), + Value::Null => Ok(HashSet::new()), + _ => Err(Error::new_parse("Could not convert to set.")), } } - /// Convert a `RedisValue` to `Vec<(RedisValue, f64)>`, if possible. - pub fn into_zset_result(self) -> Result, RedisError> { + /// Convert a `Value` to `Vec<(Value, f64)>`, if possible. + pub fn into_zset_result(self) -> Result, Error> { protocol_utils::value_to_zset_result(self) } /// Convert this value to an array if it's an array or map. /// /// If the value is not an array or map this returns a single-element array containing the original value. - pub fn into_array(self) -> Vec { + pub fn into_array(self) -> Vec { match self { - RedisValue::Array(values) => values, - RedisValue::Map(map) => { + Value::Array(values) => values, + Value::Map(map) => { let mut out = Vec::with_capacity(map.len() * 2); for (key, value) in map.inner().into_iter() { out.extend([key.into(), value]); @@ -1133,21 +1124,21 @@ impl RedisValue { /// Convert the value to an array of bytes, if possible. pub fn into_owned_bytes(self) -> Option> { let v = match self { - RedisValue::String(s) => s.to_string().into_bytes(), - RedisValue::Bytes(b) => b.to_vec(), - RedisValue::Queued => QUEUED.as_bytes().to_vec(), - RedisValue::Array(mut inner) => { + Value::String(s) => s.to_string().into_bytes(), + Value::Bytes(b) => b.to_vec(), + Value::Queued => QUEUED.as_bytes().to_vec(), + Value::Array(mut inner) => { if inner.len() == 1 { return inner.pop().and_then(|v| v.into_owned_bytes()); } else { return None; } }, - RedisValue::Integer(i) => i.to_string().into_bytes(), + Value::Integer(i) => i.to_string().into_bytes(), #[cfg(feature = "default-nil-types")] - RedisValue::Null => Vec::new(), + Value::Null => Vec::new(), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => return None, + Value::Null => return None, _ => return None, }; @@ -1157,21 +1148,21 @@ impl RedisValue { /// Convert the value into a `Bytes` view. pub fn into_bytes(self) -> Option { let v = match self { - RedisValue::String(s) => s.inner().clone(), - RedisValue::Bytes(b) => b, - RedisValue::Queued => Bytes::from_static(QUEUED.as_bytes()), - RedisValue::Array(mut inner) => { + Value::String(s) => s.inner().clone(), + Value::Bytes(b) => b, + Value::Queued => Bytes::from_static(QUEUED.as_bytes()), + Value::Array(mut inner) => { if inner.len() == 1 { return inner.pop().and_then(|v| v.into_bytes()); } else { return None; } }, - RedisValue::Integer(i) => i.to_string().into(), + Value::Integer(i) => i.to_string().into(), #[cfg(feature = "default-nil-types")] - RedisValue::Null => Bytes::new(), + Value::Null => Bytes::new(), #[cfg(not(feature = "default-nil-types"))] - RedisValue::Null => return None, + Value::Null => return None, _ => return None, }; @@ -1181,14 +1172,14 @@ impl RedisValue { /// Return the length of the inner array if the value is an array. pub fn array_len(&self) -> Option { match self { - RedisValue::Array(ref a) => Some(a.len()), + Value::Array(ref a) => Some(a.len()), _ => None, } } /// Whether the value is an array with one element. pub(crate) fn is_single_element_vec(&self) -> bool { - if let RedisValue::Array(ref d) = self { + if let Value::Array(ref d) = self { d.len() == 1 } else { false @@ -1199,7 +1190,7 @@ impl RedisValue { /// /// This uses unwrap. Use [is_single_element_vec] first. pub(crate) fn pop_or_take(self) -> Self { - if let RedisValue::Array(mut values) = self { + if let Value::Array(mut values) = self { values.pop().unwrap() } else { self @@ -1223,12 +1214,12 @@ impl RedisValue { /// information. #[cfg(feature = "i-streams")] #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] - pub fn into_xread_response(self) -> Result, RedisError> + pub fn into_xread_response(self) -> Result, Error> where - K1: FromRedisKey + Hash + Eq, - K2: FromRedisKey + Hash + Eq, - I: FromRedis, - V: FromRedis, + K1: FromKey + Hash + Eq, + K2: FromKey + Hash + Eq, + I: FromValue, + V: FromValue, { self.flatten_array_values(2).convert() } @@ -1238,11 +1229,11 @@ impl RedisValue { /// This function supports responses in both RESP2 and RESP3 formats. #[cfg(feature = "i-streams")] #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] - pub fn into_xread_value(self) -> Result>, RedisError> + pub fn into_xread_value(self) -> Result>, Error> where - K: FromRedisKey + Hash + Eq, - I: FromRedis, - V: FromRedis, + K: FromKey + Hash + Eq, + I: FromValue, + V: FromValue, { self.flatten_array_values(1).convert() } @@ -1255,13 +1246,13 @@ impl RedisValue { /// Callers should use `xautoclaim` instead if this data is needed. #[cfg(feature = "i-streams")] #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] - pub fn into_xautoclaim_values(self) -> Result<(String, Vec>), RedisError> + pub fn into_xautoclaim_values(self) -> Result<(String, Vec>), Error> where - K: FromRedisKey + Hash + Eq, - I: FromRedis, - V: FromRedis, + K: FromKey + Hash + Eq, + I: FromValue, + V: FromValue, { - if let RedisValue::Array(mut values) = self { + if let Value::Array(mut values) = self { if values.len() == 3 { // convert the redis 7.x response format to the v6 format trace!("Removing the third message PID elements from XAUTOCLAIM response."); @@ -1274,14 +1265,14 @@ impl RedisValue { Ok((cursor, entries.flatten_array_values(1).convert()?)) } else { - Err(RedisError::new_parse("Expected array response.")) + Err(Error::new_parse("Expected array response.")) } } /// Parse the value as the response from `FUNCTION LIST`, including only functions with the provided library `name`. #[cfg(feature = "i-scripts")] #[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] - pub fn as_functions(&self, name: &str) -> Result, RedisError> { + pub fn as_functions(&self, name: &str) -> Result, Error> { utils::value_to_functions(self, name) } @@ -1290,7 +1281,7 @@ impl RedisValue { /// Null values are returned as `None` to work more easily with the result of the `GEOPOS` command. #[cfg(feature = "i-geo")] #[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] - pub fn as_geo_position(&self) -> Result, RedisError> { + pub fn as_geo_position(&self) -> Result, Error> { if self.is_null() { Ok(None) } else { @@ -1299,7 +1290,7 @@ impl RedisValue { } /// Parse the value as the response to any of the relevant GEO commands that return an array of - /// [GeoRadiusInfo](crate::types::GeoRadiusInfo) values, such as `GEOSEARCH`, GEORADIUS`, etc. + /// [GeoRadiusInfo](crate::types::geo::GeoRadiusInfo) values, such as `GEOSEARCH`, GEORADIUS`, etc. #[cfg(feature = "i-geo")] #[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] pub fn into_geo_radius_result( @@ -1307,81 +1298,81 @@ impl RedisValue { withcoord: bool, withdist: bool, withhash: bool, - ) -> Result, RedisError> { + ) -> Result, Error> { match self { - RedisValue::Array(data) => data + Value::Array(data) => data .into_iter() - .map(|value| GeoRadiusInfo::from_redis_value(value, withcoord, withdist, withhash)) + .map(|value| GeoRadiusInfo::from_value(value, withcoord, withdist, withhash)) .collect(), - RedisValue::Null => Ok(Vec::new()), - _ => Err(RedisError::new(RedisErrorKind::Parse, "Expected array.")), + Value::Null => Ok(Vec::new()), + _ => Err(Error::new(ErrorKind::Parse, "Expected array.")), } } - /// Replace this value with `RedisValue::Null`, returning the original value. - pub fn take(&mut self) -> RedisValue { - mem::replace(self, RedisValue::Null) + /// Replace this value with `Value::Null`, returning the original value. + pub fn take(&mut self) -> Value { + mem::replace(self, Value::Null) } - /// Attempt to convert this value to any value that implements the [FromRedis](crate::types::FromRedis) trait. - pub fn convert(self) -> Result + /// Attempt to convert this value to any value that implements the [FromValue](crate::types::FromValue) trait. + pub fn convert(self) -> Result where - R: FromRedis, + R: FromValue, { R::from_value(self) } /// Whether the value can be hashed. /// - /// Some use cases require using `RedisValue` types as keys in a `HashMap`, etc. Trying to do so with an aggregate + /// Some use cases require using `Value` types as keys in a `HashMap`, etc. Trying to do so with an aggregate /// type can panic, and this function can be used to more gracefully handle this situation. pub fn can_hash(&self) -> bool { matches!( self.kind(), - RedisValueKind::String - | RedisValueKind::Boolean - | RedisValueKind::Double - | RedisValueKind::Integer - | RedisValueKind::Bytes - | RedisValueKind::Null - | RedisValueKind::Array - | RedisValueKind::Queued + ValueKind::String + | ValueKind::Boolean + | ValueKind::Double + | ValueKind::Integer + | ValueKind::Bytes + | ValueKind::Null + | ValueKind::Array + | ValueKind::Queued ) } /// Convert the value to JSON. #[cfg(feature = "serde-json")] #[cfg_attr(docsrs, doc(cfg(feature = "serde-json")))] - pub fn into_json(self) -> Result { - Value::from_value(self) + pub fn into_json(self) -> Result { + serde_json::Value::from_value(self) } } -impl Hash for RedisValue { +impl Hash for Value { fn hash(&self, state: &mut H) { // used to prevent collisions between different types let prefix = match self.kind() { - RedisValueKind::Boolean => b'B', - RedisValueKind::Double => b'd', - RedisValueKind::Integer => b'i', - RedisValueKind::String => b's', - RedisValueKind::Null => b'n', - RedisValueKind::Queued => b'q', - RedisValueKind::Array => b'a', - RedisValueKind::Map => b'm', - RedisValueKind::Bytes => b'b', + ValueKind::Boolean => b'B', + ValueKind::Double => b'd', + ValueKind::Integer => b'i', + ValueKind::String => b's', + ValueKind::Null => b'n', + ValueKind::Queued => b'q', + ValueKind::Array => b'a', + ValueKind::Map => b'm', + ValueKind::Bytes => b'b', }; prefix.hash(state); match *self { - RedisValue::Boolean(b) => b.hash(state), - RedisValue::Double(f) => f.to_be_bytes().hash(state), - RedisValue::Integer(d) => d.hash(state), - RedisValue::String(ref s) => s.hash(state), - RedisValue::Bytes(ref b) => b.hash(state), - RedisValue::Null => NULL.hash(state), - RedisValue::Queued => QUEUED.hash(state), - RedisValue::Array(ref arr) => { + Value::Boolean(b) => b.hash(state), + Value::Double(f) => f.to_be_bytes().hash(state), + Value::Integer(d) => d.hash(state), + Value::String(ref s) => s.hash(state), + Value::Bytes(ref b) => b.hash(state), + Value::Null => NULL.hash(state), + Value::Queued => QUEUED.hash(state), + Value::Array(ref arr) => { for value in arr.iter() { value.hash(state); } @@ -1391,312 +1382,302 @@ impl Hash for RedisValue { } } -#[cfg(not(feature = "specialize-into-bytes"))] -#[cfg_attr(docsrs, doc(cfg(not(feature = "specialize-into-bytes"))))] -impl From for RedisValue { - fn from(d: u8) -> Self { - RedisValue::Integer(d as i64) - } -} - -impl From for RedisValue { +impl From for Value { fn from(d: u16) -> Self { - RedisValue::Integer(d as i64) + Value::Integer(d as i64) } } -impl From for RedisValue { +impl From for Value { fn from(d: u32) -> Self { - RedisValue::Integer(d as i64) + Value::Integer(d as i64) } } -impl From for RedisValue { +impl From for Value { fn from(d: i8) -> Self { - RedisValue::Integer(d as i64) + Value::Integer(d as i64) } } -impl From for RedisValue { +impl From for Value { fn from(d: i16) -> Self { - RedisValue::Integer(d as i64) + Value::Integer(d as i64) } } -impl From for RedisValue { +impl From for Value { fn from(d: i32) -> Self { - RedisValue::Integer(d as i64) + Value::Integer(d as i64) } } -impl From for RedisValue { +impl From for Value { fn from(d: i64) -> Self { - RedisValue::Integer(d) + Value::Integer(d) } } -impl From for RedisValue { +impl From for Value { fn from(f: f32) -> Self { - RedisValue::Double(f as f64) + Value::Double(f as f64) } } -impl From for RedisValue { +impl From for Value { fn from(f: f64) -> Self { - RedisValue::Double(f) + Value::Double(f) } } -impl TryFrom for RedisValue { - type Error = RedisError; +impl TryFrom for Value { + type Error = Error; fn try_from(d: u64) -> Result { if d >= (i64::MAX as u64) { - return Err(RedisError::new(RedisErrorKind::Unknown, "Unsigned integer too large.")); + return Err(Error::new(ErrorKind::Unknown, "Unsigned integer too large.")); } Ok((d as i64).into()) } } -impl TryFrom for RedisValue { - type Error = RedisError; +impl TryFrom for Value { + type Error = Error; fn try_from(d: u128) -> Result { if d >= (i64::MAX as u128) { - return Err(RedisError::new(RedisErrorKind::Unknown, "Unsigned integer too large.")); + return Err(Error::new(ErrorKind::Unknown, "Unsigned integer too large.")); } Ok((d as i64).into()) } } -impl TryFrom for RedisValue { - type Error = RedisError; +impl TryFrom for Value { + type Error = Error; fn try_from(d: i128) -> Result { if d >= (i64::MAX as i128) { - return Err(RedisError::new(RedisErrorKind::Unknown, "Signed integer too large.")); + return Err(Error::new(ErrorKind::Unknown, "Signed integer too large.")); } Ok((d as i64).into()) } } -impl TryFrom for RedisValue { - type Error = RedisError; +impl TryFrom for Value { + type Error = Error; fn try_from(d: usize) -> Result { if d >= (i64::MAX as usize) { - return Err(RedisError::new(RedisErrorKind::Unknown, "Unsigned integer too large.")); + return Err(Error::new(ErrorKind::Unknown, "Unsigned integer too large.")); } Ok((d as i64).into()) } } -impl From for RedisValue { +impl From for Value { fn from(s: Str) -> Self { - RedisValue::String(s) + Value::String(s) } } -impl From for RedisValue { +impl From for Value { fn from(b: Bytes) -> Self { - RedisValue::Bytes(b) + Value::Bytes(b) } } -impl From> for RedisValue { +impl From> for Value { fn from(b: Box<[u8]>) -> Self { - RedisValue::Bytes(b.into()) + Value::Bytes(b.into()) } } -impl From for RedisValue { +impl From for Value { fn from(d: String) -> Self { - RedisValue::String(Str::from(d)) + Value::String(Str::from(d)) } } -impl<'a> From<&'a String> for RedisValue { +impl<'a> From<&'a String> for Value { fn from(d: &'a String) -> Self { - RedisValue::String(Str::from(d)) + Value::String(Str::from(d)) } } -impl<'a> From<&'a str> for RedisValue { +impl<'a> From<&'a str> for Value { fn from(d: &'a str) -> Self { - RedisValue::String(Str::from(d)) + Value::String(Str::from(d)) } } -impl<'a> From<&'a [u8]> for RedisValue { +impl<'a> From<&'a [u8]> for Value { fn from(b: &'a [u8]) -> Self { - RedisValue::Bytes(Bytes::from(b.to_vec())) + Value::Bytes(Bytes::from(b.to_vec())) } } -impl From for RedisValue { +impl From for Value { fn from(d: bool) -> Self { - RedisValue::Boolean(d) + Value::Boolean(d) } } -impl TryFrom> for RedisValue +impl TryFrom> for Value where - T: TryInto, - T::Error: Into, + T: TryInto, + T::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(d: Option) -> Result { match d { Some(i) => to!(i), - None => Ok(RedisValue::Null), + None => Ok(Value::Null), } } } -impl<'a, T, const N: usize> TryFrom<&'a [T; N]> for RedisValue +impl<'a, T, const N: usize> TryFrom<&'a [T; N]> for Value where - T: TryInto + Clone, - T::Error: Into, + T: TryInto + Clone, + T::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(value: &'a [T; N]) -> Result { let values = value .iter() .map(|v| v.clone().try_into().map_err(|e| e.into())) - .collect::, RedisError>>()?; + .collect::, Error>>()?; - Ok(RedisValue::Array(values)) + Ok(Value::Array(values)) } } -impl TryFrom<[T; N]> for RedisValue +impl TryFrom<[T; N]> for Value where - T: TryInto + Clone, - T::Error: Into, + T: TryInto + Clone, + T::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(value: [T; N]) -> Result { let values = value .into_iter() .map(|v| v.try_into().map_err(|e| e.into())) - .collect::, RedisError>>()?; + .collect::, Error>>()?; - Ok(RedisValue::Array(values)) + Ok(Value::Array(values)) } } -#[cfg(feature = "specialize-into-bytes")] -#[cfg_attr(docsrs, doc(cfg(feature = "specialize-into-bytes")))] -impl TryFrom> for RedisValue { - type Error = RedisError; +impl TryFrom> for Value { + type Error = Error; fn try_from(value: Vec) -> Result { - Ok(RedisValue::Bytes(value.into())) + Ok(Value::Bytes(value.into())) } } -impl TryFrom> for RedisValue +impl TryFrom> for Value where - T: TryInto, - T::Error: Into, + T: TryInto, + T::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(value: Vec) -> Result { let values = value .into_iter() .map(|v| v.try_into().map_err(|e| e.into())) - .collect::, RedisError>>()?; + .collect::, Error>>()?; - Ok(RedisValue::Array(values)) + Ok(Value::Array(values)) } } -impl TryFrom> for RedisValue +impl TryFrom> for Value where - T: TryInto, - T::Error: Into, + T: TryInto, + T::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(value: VecDeque) -> Result { let values = value .into_iter() .map(|v| v.try_into().map_err(|e| e.into())) - .collect::, RedisError>>()?; + .collect::, Error>>()?; - Ok(RedisValue::Array(values)) + Ok(Value::Array(values)) } } -impl FromIterator for RedisValue +impl FromIterator for Value where - V: Into, + V: Into, { fn from_iter>(iter: I) -> Self { - RedisValue::Array(iter.into_iter().map(|v| v.into()).collect()) + Value::Array(iter.into_iter().map(|v| v.into()).collect()) } } -impl TryFrom> for RedisValue +impl TryFrom> for Value where - K: TryInto, - K::Error: Into, - V: TryInto, - V::Error: Into, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(d: HashMap) -> Result { - Ok(RedisValue::Map(RedisMap { - inner: utils::into_redis_map(d.into_iter())?, + Ok(Value::Map(Map { + inner: utils::into_map(d.into_iter())?, })) } } -impl TryFrom> for RedisValue +impl TryFrom> for Value where - K: TryInto, - K::Error: Into, - V: TryInto, - V::Error: Into, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(d: BTreeMap) -> Result { - Ok(RedisValue::Map(RedisMap { - inner: utils::into_redis_map(d.into_iter())?, + Ok(Value::Map(Map { + inner: utils::into_map(d.into_iter())?, })) } } -impl From for RedisValue { - fn from(d: RedisKey) -> Self { - RedisValue::Bytes(d.key) +impl From for Value { + fn from(d: Key) -> Self { + Value::Bytes(d.key) } } -impl From for RedisValue { - fn from(m: RedisMap) -> Self { - RedisValue::Map(m) +impl From for Value { + fn from(m: Map) -> Self { + Value::Map(m) } } -impl From<()> for RedisValue { +impl From<()> for Value { fn from(_: ()) -> Self { - RedisValue::Null + Value::Null } } -impl TryFrom for RedisValue { - type Error = RedisError; +impl TryFrom for Value { + type Error = Error; fn try_from(value: Resp3Frame) -> Result { protocol_utils::frame_to_results(value) @@ -1708,23 +1689,22 @@ mod tests { use super::*; #[test] - fn redis_map_from_iter() { - let map = [("hello", "world")].into_iter().collect::(); - assert_eq!(map.inner[&RedisKey::from("hello")], RedisValue::from("world")); + fn map_from_iter() { + let map = [("hello", "world")].into_iter().collect::(); + assert_eq!(map.inner[&Key::from("hello")], Value::from("world")); } - // requires specialization of TryFrom> for RedisValue + // requires specialization of TryFrom> for Value #[test] - #[cfg(feature = "specialize-into-bytes")] - fn redis_bytes_from_vec_u8() { + fn bytes_from_vec_u8() { let input: Vec = vec![0, 1, 2]; - let output: RedisValue = input.clone().try_into().unwrap(); - assert_eq!(output, RedisValue::Bytes(Bytes::from(input))); + let output: Value = input.clone().try_into().unwrap(); + assert_eq!(output, Value::Bytes(Bytes::from(input))); let input: Vec = vec![0, 1, 2, 3]; - let output: RedisValue = input.clone().try_into().unwrap(); + let output: Value = input.clone().try_into().unwrap(); assert_eq!( output, - RedisValue::Array(input.into_iter().map(|v| RedisValue::Integer(v as i64)).collect()) + Value::Array(input.into_iter().map(|v| Value::Integer(v as i64)).collect()) ); } } diff --git a/src/types/builder.rs b/src/types/builder.rs index 2f3d9ace..74f13705 100644 --- a/src/types/builder.rs +++ b/src/types/builder.rs @@ -1,8 +1,8 @@ use crate::{ - clients::{RedisClient, RedisPool}, - error::{RedisError, RedisErrorKind}, + clients::{Client, Pool}, + error::{Error, ErrorKind}, prelude::ReconnectPolicy, - types::{ConnectionConfig, PerformanceConfig, RedisConfig, ServerConfig}, + types::config::{Config, ConnectionConfig, PerformanceConfig, ServerConfig}, }; #[cfg(not(feature = "glommio"))] @@ -10,7 +10,7 @@ use crate::clients::ExclusivePool; #[cfg(feature = "subscriber-client")] use crate::clients::SubscriberClient; #[cfg(feature = "sentinel-client")] -use crate::{clients::SentinelClient, types::SentinelConfig}; +use crate::{clients::SentinelClient, types::config::SentinelConfig}; /// A client and pool builder interface. /// @@ -18,18 +18,15 @@ use crate::{clients::SentinelClient, types::SentinelConfig}; /// # use std::time::Duration; /// # use redis_protocol::resp3::types::RespVersion; /// # use fred::prelude::*; -/// fn example() -> Result<(), RedisError> { +/// fn example() -> Result<(), Error> { /// // use default values /// let client = Builder::default_centralized().build()?; /// /// // or initialize from a URL or config -/// let config = RedisConfig::from_url("redis://localhost:6379/1")?; +/// let config = Config::from_url("redis://localhost:6379/1")?; /// let mut builder = Builder::from_config(config); /// // or modify values in place (creating defaults if needed) /// builder -/// .with_performance_config(|config| { -/// config.auto_pipeline = true; -/// }) /// .with_config(|config| { /// config.version = RespVersion::RESP3; /// config.fail_fast = true; @@ -57,7 +54,7 @@ use crate::{clients::SentinelClient, types::SentinelConfig}; /// ``` #[derive(Clone, Debug)] pub struct Builder { - config: Option, + config: Option, performance: PerformanceConfig, connection: ConnectionConfig, policy: Option, @@ -82,7 +79,7 @@ impl Builder { /// Create a new builder instance with default config values for a centralized deployment. pub fn default_centralized() -> Self { Builder { - config: Some(RedisConfig { + config: Some(Config { server: ServerConfig::default_centralized(), ..Default::default() }), @@ -93,7 +90,7 @@ impl Builder { /// Create a new builder instance with default config values for a clustered deployment. pub fn default_clustered() -> Self { Builder { - config: Some(RedisConfig { + config: Some(Config { server: ServerConfig::default_clustered(), ..Default::default() }), @@ -102,7 +99,7 @@ impl Builder { } /// Create a new builder instance from the provided client config. - pub fn from_config(config: RedisConfig) -> Self { + pub fn from_config(config: Config) -> Self { Builder { config: Some(config), ..Default::default() @@ -110,7 +107,7 @@ impl Builder { } /// Read the client config. - pub fn get_config(&self) -> Option<&RedisConfig> { + pub fn get_config(&self) -> Option<&Config> { self.config.as_ref() } @@ -132,12 +129,12 @@ impl Builder { /// Read the sentinel client config. #[cfg(feature = "sentinel-client")] #[cfg_attr(docsrs, doc(cfg(feature = "sentinel-client")))] - pub fn get_sentinel_config(&self) -> Option<&RedisConfig> { + pub fn get_sentinel_config(&self) -> Option<&Config> { self.config.as_ref() } /// Overwrite the client config on the builder. - pub fn set_config(&mut self, config: RedisConfig) -> &mut Self { + pub fn set_config(&mut self, config: Config) -> &mut Self { self.config = Some(config); self } @@ -171,12 +168,12 @@ impl Builder { /// Modify the client config in place, creating a new one with default centralized values first if needed. pub fn with_config(&mut self, func: F) -> &mut Self where - F: FnOnce(&mut RedisConfig), + F: FnOnce(&mut Config), { if let Some(config) = self.config.as_mut() { func(config); } else { - let mut config = RedisConfig::default(); + let mut config = Config::default(); func(&mut config); self.config = Some(config); } @@ -221,23 +218,23 @@ impl Builder { } /// Create a new client. - pub fn build(&self) -> Result { + pub fn build(&self) -> Result { if let Some(config) = self.config.as_ref() { - Ok(RedisClient::new( + Ok(Client::new( config.clone(), Some(self.performance.clone()), Some(self.connection.clone()), self.policy.clone(), )) } else { - Err(RedisError::new(RedisErrorKind::Config, "Missing client configuration.")) + Err(Error::new(ErrorKind::Config, "Missing client configuration.")) } } /// Create a new client pool. - pub fn build_pool(&self, size: usize) -> Result { + pub fn build_pool(&self, size: usize) -> Result { if let Some(config) = self.config.as_ref() { - RedisPool::new( + Pool::new( config.clone(), Some(self.performance.clone()), Some(self.connection.clone()), @@ -245,13 +242,13 @@ impl Builder { size, ) } else { - Err(RedisError::new(RedisErrorKind::Config, "Missing client configuration.")) + Err(Error::new(ErrorKind::Config, "Missing client configuration.")) } } /// Create a new exclusive client pool. #[cfg(not(feature = "glommio"))] - pub fn build_exclusive_pool(&self, size: usize) -> Result { + pub fn build_exclusive_pool(&self, size: usize) -> Result { if let Some(config) = self.config.as_ref() { ExclusivePool::new( config.clone(), @@ -261,14 +258,14 @@ impl Builder { size, ) } else { - Err(RedisError::new(RedisErrorKind::Config, "Missing client configuration.")) + Err(Error::new(ErrorKind::Config, "Missing client configuration.")) } } /// Create a new subscriber client. #[cfg(feature = "subscriber-client")] #[cfg_attr(docsrs, doc(cfg(feature = "subscriber-client")))] - pub fn build_subscriber_client(&self) -> Result { + pub fn build_subscriber_client(&self) -> Result { if let Some(config) = self.config.as_ref() { Ok(SubscriberClient::new( config.clone(), @@ -277,7 +274,7 @@ impl Builder { self.policy.clone(), )) } else { - Err(RedisError::new(RedisErrorKind::Config, "Missing client configuration.")) + Err(Error::new(ErrorKind::Config, "Missing client configuration.")) } } @@ -287,7 +284,7 @@ impl Builder { /// `ServerConfig::Sentinel` to interact with Redis servers behind a sentinel layer. #[cfg(feature = "sentinel-client")] #[cfg_attr(docsrs, doc(cfg(feature = "sentinel-client")))] - pub fn build_sentinel_client(&self) -> Result { + pub fn build_sentinel_client(&self) -> Result { if let Some(config) = self.sentinel.as_ref() { Ok(SentinelClient::new( config.clone(), @@ -296,10 +293,7 @@ impl Builder { self.policy.clone(), )) } else { - Err(RedisError::new( - RedisErrorKind::Config, - "Missing sentinel client configuration.", - )) + Err(Error::new(ErrorKind::Config, "Missing sentinel client configuration.")) } } } diff --git a/src/types/client.rs b/src/types/client.rs index 578de695..a97701d7 100644 --- a/src/types/client.rs +++ b/src/types/client.rs @@ -3,8 +3,8 @@ use bytes_utils::Str; #[cfg(feature = "i-tracking")] use crate::{ - error::{RedisError, RedisErrorKind}, - types::{Message, RedisKey, RedisValue, Server}, + error::{Error, ErrorKind}, + types::{config::Server, Key, Message, Value}, }; /// The type of clients to close. @@ -128,30 +128,30 @@ impl Toggle { #[cfg(feature = "i-tracking")] #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl TryFrom<&str> for Toggle { - type Error = RedisError; + type Error = Error; fn try_from(value: &str) -> Result { - Toggle::from_str(value).ok_or(RedisError::new(RedisErrorKind::Parse, "Invalid toggle value.")) + Toggle::from_str(value).ok_or(Error::new(ErrorKind::Parse, "Invalid toggle value.")) } } #[cfg(feature = "i-tracking")] #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl TryFrom for Toggle { - type Error = RedisError; + type Error = Error; fn try_from(value: String) -> Result { - Toggle::from_str(&value).ok_or(RedisError::new(RedisErrorKind::Parse, "Invalid toggle value.")) + Toggle::from_str(&value).ok_or(Error::new(ErrorKind::Parse, "Invalid toggle value.")) } } #[cfg(feature = "i-tracking")] #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl TryFrom<&String> for Toggle { - type Error = RedisError; + type Error = Error; fn try_from(value: &String) -> Result { - Toggle::from_str(value).ok_or(RedisError::new(RedisErrorKind::Parse, "Invalid toggle value.")) + Toggle::from_str(value).ok_or(Error::new(ErrorKind::Parse, "Invalid toggle value.")) } } @@ -172,7 +172,7 @@ impl From for Toggle { #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] #[derive(Clone, Debug, Eq, PartialEq)] pub struct Invalidation { - pub keys: Vec, + pub keys: Vec, pub server: Server, } @@ -182,13 +182,13 @@ impl Invalidation { pub(crate) fn from_message(message: Message, server: &Server) -> Option { Some(Invalidation { keys: match message.value { - RedisValue::Array(values) => values.into_iter().filter_map(|v| v.try_into().ok()).collect(), - RedisValue::String(s) => vec![s.into()], - RedisValue::Bytes(b) => vec![b.into()], - RedisValue::Double(f) => vec![f.into()], - RedisValue::Integer(i) => vec![i.into()], - RedisValue::Boolean(b) => vec![b.into()], - RedisValue::Null => vec![], + Value::Array(values) => values.into_iter().filter_map(|v| v.try_into().ok()).collect(), + Value::String(s) => vec![s.into()], + Value::Bytes(b) => vec![b.into()], + Value::Double(f) => vec![f.into()], + Value::Integer(i) => vec![i.into()], + Value::Boolean(b) => vec![b.into()], + Value::Null => vec![], _ => { trace!("Dropping invalid invalidation message."); return None; diff --git a/src/types/cluster.rs b/src/types/cluster.rs index d21f2aad..572b6cb1 100644 --- a/src/types/cluster.rs +++ b/src/types/cluster.rs @@ -1,7 +1,7 @@ pub use crate::protocol::types::{ClusterRouting, SlotRange}; use crate::{ - error::{RedisError, RedisErrorKind}, - types::RedisValue, + error::{Error, ErrorKind}, + types::Value, utils, }; use bytes_utils::Str; @@ -12,10 +12,10 @@ macro_rules! parse_or_zero( } ); -fn parse_cluster_info_line(info: &mut ClusterInfo, line: &str) -> Result<(), RedisError> { +fn parse_cluster_info_line(info: &mut ClusterInfo, line: &str) -> Result<(), Error> { let parts: Vec<&str> = line.split(':').collect(); if parts.len() != 2 { - return Err(RedisError::new(RedisErrorKind::Protocol, "Expected key:value pair.")); + return Err(Error::new(ErrorKind::Protocol, "Expected key:value pair.")); } let (field, val) = (parts[0], parts[1]); @@ -23,7 +23,7 @@ fn parse_cluster_info_line(info: &mut ClusterInfo, line: &str) -> Result<(), Red "cluster_state" => match val { "ok" => info.cluster_state = ClusterState::Ok, "fail" => info.cluster_state = ClusterState::Fail, - _ => return Err(RedisError::new(RedisErrorKind::Protocol, "Invalid cluster state.")), + _ => return Err(Error::new(ErrorKind::Protocol, "Invalid cluster state.")), }, "cluster_slots_assigned" => info.cluster_slots_assigned = parse_or_zero!(val, u16), "cluster_slots_ok" => info.cluster_slots_ok = parse_or_zero!(val, u16), @@ -74,10 +74,10 @@ pub struct ClusterInfo { pub cluster_stats_messages_received: u64, } -impl TryFrom for ClusterInfo { - type Error = RedisError; +impl TryFrom for ClusterInfo { + type Error = Error; - fn try_from(value: RedisValue) -> Result { + fn try_from(value: Value) -> Result { if let Some(data) = value.as_bytes_str() { let mut out = ClusterInfo::default(); @@ -89,7 +89,7 @@ impl TryFrom for ClusterInfo { } Ok(out) } else { - Err(RedisError::new(RedisErrorKind::Protocol, "Expected string response.")) + Err(Error::new(ErrorKind::Protocol, "Expected string response.")) } } } diff --git a/src/types/misc.rs b/src/types/common.rs similarity index 79% rename from src/types/misc.rs rename to src/types/common.rs index f0979cbb..f07b98c3 100644 --- a/src/types/misc.rs +++ b/src/types/common.rs @@ -3,13 +3,14 @@ pub use crate::protocol::{ types::{Message, MessageKind}, }; use crate::{ - error::{RedisError, RedisErrorKind}, - types::{RedisKey, RedisValue, Server}, + error::{Error, ErrorKind}, + types::{Key, Value}, utils, }; use bytes_utils::Str; use std::{convert::TryFrom, fmt, time::Duration}; +use crate::prelude::Server; #[cfg(feature = "i-memory")] use crate::utils::convert_or_default; #[cfg(feature = "i-memory")] @@ -33,34 +34,74 @@ impl ShutdownFlags { } } -/// An event on the publish-subscribe interface describing a keyspace notification. +/// The state of the underlying connection to the Redis server. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClientState { + Disconnected, + Disconnecting, + Connected, + Connecting, +} + +impl ClientState { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ClientState::Connecting => "Connecting", + ClientState::Connected => "Connected", + ClientState::Disconnecting => "Disconnecting", + ClientState::Disconnected => "Disconnected", + }) + } +} + +impl fmt::Display for ClientState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.to_str()) + } +} +/// An enum describing the possible ways in which a Redis cluster can change state. /// -/// -#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct KeyspaceEvent { - pub db: u8, - pub operation: String, - pub key: RedisKey, +/// See [on_cluster_change](crate::interfaces::EventInterface::on_cluster_change) for more information. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClusterStateChange { + /// A node was added to the cluster. + /// + /// This implies that hash slots were also probably rebalanced. + Add(Server), + /// A node was removed from the cluster. + /// + /// This implies that hash slots were also probably rebalanced. + Remove(Server), + /// Hash slots were rebalanced across the cluster and/or local routing state was updated. + Rebalance, } -/// Aggregate options for the [zinterstore](https://redis.io/commands/zinterstore) (and related) commands. -pub enum AggregateOptions { - Sum, - Min, - Max, +/// Arguments to the CLIENT UNBLOCK command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClientUnblockFlag { + Timeout, + Error, } -impl AggregateOptions { - #[cfg(feature = "i-sorted-sets")] +impl ClientUnblockFlag { pub(crate) fn to_str(&self) -> Str { utils::static_str(match *self { - AggregateOptions::Sum => "SUM", - AggregateOptions::Min => "MIN", - AggregateOptions::Max => "MAX", + ClientUnblockFlag::Timeout => "TIMEOUT", + ClientUnblockFlag::Error => "ERROR", }) } } +/// An event on the publish-subscribe interface describing a keyspace notification. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct KeyspaceEvent { + pub db: u8, + pub operation: String, + pub key: Key, +} + /// Options for the [info](https://redis.io/commands/info) command. #[derive(Clone, Debug, Eq, PartialEq)] pub enum InfoKind { @@ -139,23 +180,6 @@ impl CustomCommand { } } -/// An enum describing the possible ways in which a Redis cluster can change state. -/// -/// See [on_cluster_change](crate::interfaces::EventInterface::on_cluster_change) for more information. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClusterStateChange { - /// A node was added to the cluster. - /// - /// This implies that hash slots were also probably rebalanced. - Add(Server), - /// A node was removed from the cluster. - /// - /// This implies that hash slots were also probably rebalanced. - Remove(Server), - /// Hash slots were rebalanced across the cluster and/or local routing state was updated. - Rebalance, -} - /// Options for the [set](https://redis.io/commands/set) command. /// /// @@ -226,32 +250,6 @@ impl Expiration { } } -/// The state of the underlying connection to the Redis server. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClientState { - Disconnected, - Disconnecting, - Connected, - Connecting, -} - -impl ClientState { - pub(crate) fn to_str(&self) -> Str { - utils::static_str(match *self { - ClientState::Connecting => "Connecting", - ClientState::Connected => "Connected", - ClientState::Disconnecting => "Disconnecting", - ClientState::Disconnected => "Disconnected", - }) - } -} - -impl fmt::Display for ClientState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.to_str()) - } -} - /// The parsed result of the MEMORY STATS command for a specific database. /// /// @@ -276,7 +274,7 @@ impl Default for DatabaseMemoryStats { } #[cfg(feature = "i-memory")] -fn parse_database_memory_stat(stats: &mut DatabaseMemoryStats, key: &str, value: RedisValue) { +fn parse_database_memory_stat(stats: &mut DatabaseMemoryStats, key: &str, value: Value) { match key { "overhead.hashtable.main" => stats.overhead_hashtable_main = convert_or_default(value), "overhead.hashtable.expires" => stats.overhead_hashtable_expires = convert_or_default(value), @@ -287,11 +285,11 @@ fn parse_database_memory_stat(stats: &mut DatabaseMemoryStats, key: &str, value: #[cfg(feature = "i-memory")] #[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] -impl TryFrom for DatabaseMemoryStats { - type Error = RedisError; +impl TryFrom for DatabaseMemoryStats { + type Error = Error; - fn try_from(value: RedisValue) -> Result { - let values: HashMap = value.convert()?; + fn try_from(value: Value) -> Result { + let values: HashMap = value.convert()?; let mut out = DatabaseMemoryStats::default(); for (key, value) in values.into_iter() { @@ -405,7 +403,7 @@ impl PartialEq for MemoryStats { impl Eq for MemoryStats {} #[cfg(feature = "i-memory")] -fn parse_memory_stat_field(stats: &mut MemoryStats, key: &str, value: RedisValue) { +fn parse_memory_stat_field(stats: &mut MemoryStats, key: &str, value: Value) { match key { "peak.allocated" => stats.peak_allocated = convert_or_default(value), "total.allocated" => stats.total_allocated = convert_or_default(value), @@ -451,11 +449,11 @@ fn parse_memory_stat_field(stats: &mut MemoryStats, key: &str, value: RedisValue #[cfg(feature = "i-memory")] #[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] -impl TryFrom for MemoryStats { - type Error = RedisError; +impl TryFrom for MemoryStats { + type Error = Error; - fn try_from(value: RedisValue) -> Result { - let values: HashMap = value.convert()?; + fn try_from(value: Value) -> Result { + let values: HashMap = value.convert()?; let mut out = MemoryStats::default(); for (key, value) in values.into_iter() { @@ -473,43 +471,39 @@ pub struct SlowlogEntry { pub id: i64, pub timestamp: i64, pub duration: Duration, - pub args: Vec, + pub args: Vec, pub ip: Option, pub name: Option, } -impl TryFrom for SlowlogEntry { - type Error = RedisError; +impl TryFrom for SlowlogEntry { + type Error = Error; - fn try_from(value: RedisValue) -> Result { - if let RedisValue::Array(values) = value { + fn try_from(value: Value) -> Result { + if let Value::Array(values) = value { if values.len() < 4 { - return Err(RedisError::new( - RedisErrorKind::Protocol, - "Expected at least 4 response values.", - )); + return Err(Error::new(ErrorKind::Protocol, "Expected at least 4 response values.")); } let id = values[0] .as_i64() - .ok_or(RedisError::new(RedisErrorKind::Protocol, "Expected integer ID."))?; + .ok_or(Error::new(ErrorKind::Protocol, "Expected integer ID."))?; let timestamp = values[1] .as_i64() - .ok_or(RedisError::new(RedisErrorKind::Protocol, "Expected integer timestamp."))?; + .ok_or(Error::new(ErrorKind::Protocol, "Expected integer timestamp."))?; let duration = values[2] .as_u64() .map(Duration::from_micros) - .ok_or(RedisError::new(RedisErrorKind::Protocol, "Expected integer duration."))?; + .ok_or(Error::new(ErrorKind::Protocol, "Expected integer duration."))?; let args = values[3].clone().into_multiple_values(); let (ip, name) = if values.len() == 6 { let ip = values[4] .as_bytes_str() - .ok_or(RedisError::new(RedisErrorKind::Protocol, "Expected IP address string."))?; - let name = values[5].as_bytes_str().ok_or(RedisError::new( - RedisErrorKind::Protocol, - "Expected client name string.", - ))?; + .ok_or(Error::new(ErrorKind::Protocol, "Expected IP address string."))?; + let name = values[5] + .as_bytes_str() + .ok_or(Error::new(ErrorKind::Protocol, "Expected client name string."))?; (Some(ip), Some(name)) } else { @@ -525,30 +519,11 @@ impl TryFrom for SlowlogEntry { name, }) } else { - Err(RedisError::new_parse("Expected array.")) + Err(Error::new_parse("Expected array.")) } } } -/// Flags for the SCRIPT DEBUG command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ScriptDebugFlag { - Yes, - No, - Sync, -} - -impl ScriptDebugFlag { - #[cfg(feature = "i-scripts")] - pub(crate) fn to_str(&self) -> Str { - utils::static_str(match *self { - ScriptDebugFlag::Yes => "YES", - ScriptDebugFlag::No => "NO", - ScriptDebugFlag::Sync => "SYNC", - }) - } -} - /// Arguments for the `SENTINEL SIMULATE-FAILURE` command. #[derive(Clone, Debug, Eq, PartialEq)] #[cfg(feature = "sentinel-client")] @@ -586,99 +561,3 @@ impl SortOrder { }) } } - -/// The policy type for the [FUNCTION RESTORE](https://redis.io/commands/function-restore/) command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum FnPolicy { - Flush, - Append, - Replace, -} - -impl Default for FnPolicy { - fn default() -> Self { - FnPolicy::Append - } -} - -impl FnPolicy { - #[cfg(feature = "i-scripts")] - pub(crate) fn to_str(&self) -> Str { - utils::static_str(match *self { - FnPolicy::Flush => "FLUSH", - FnPolicy::Append => "APPEND", - FnPolicy::Replace => "REPLACE", - }) - } - - pub(crate) fn from_str(s: &str) -> Result { - Ok(match s { - "flush" | "FLUSH" => FnPolicy::Flush, - "append" | "APPEND" => FnPolicy::Append, - "replace" | "REPLACE" => FnPolicy::Replace, - _ => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "Invalid function restore policy.", - )) - }, - }) - } -} - -// have to implement these for specific types to avoid conflicting with the core Into implementation -impl TryFrom<&str> for FnPolicy { - type Error = RedisError; - - fn try_from(value: &str) -> Result { - FnPolicy::from_str(value) - } -} - -impl TryFrom<&String> for FnPolicy { - type Error = RedisError; - - fn try_from(value: &String) -> Result { - FnPolicy::from_str(value.as_str()) - } -} - -impl TryFrom for FnPolicy { - type Error = RedisError; - - fn try_from(value: String) -> Result { - FnPolicy::from_str(value.as_str()) - } -} - -impl TryFrom for FnPolicy { - type Error = RedisError; - - fn try_from(value: Str) -> Result { - FnPolicy::from_str(&value) - } -} - -impl TryFrom<&Str> for FnPolicy { - type Error = RedisError; - - fn try_from(value: &Str) -> Result { - FnPolicy::from_str(value) - } -} - -/// Arguments to the CLIENT UNBLOCK command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClientUnblockFlag { - Timeout, - Error, -} - -impl ClientUnblockFlag { - pub(crate) fn to_str(&self) -> Str { - utils::static_str(match *self { - ClientUnblockFlag::Timeout => "TIMEOUT", - ClientUnblockFlag::Error => "ERROR", - }) - } -} diff --git a/src/types/config.rs b/src/types/config.rs index 0500ece0..cc884db3 100644 --- a/src/types/config.rs +++ b/src/types/config.rs @@ -1,7 +1,7 @@ pub use crate::protocol::types::Server; use crate::{ - error::{RedisError, RedisErrorKind}, - protocol::command::RedisCommand, + error::{Error, ErrorKind}, + protocol::command::Command, types::{ClusterHash, RespVersion}, utils, }; @@ -106,13 +106,13 @@ pub enum ReconnectPolicy { delay: u32, jitter: u32, }, - /// Backoff reconnection attempts exponentially, multiplying the last delay by `mult` each time. + /// Backoff reconnection attempts exponentially, multiplying the last delay by `base` each time. Exponential { attempts: u32, max_attempts: u32, min_delay: u32, max_delay: u32, - mult: u32, + base: u32, jitter: u32, }, } @@ -151,12 +151,12 @@ impl ReconnectPolicy { } /// Create a new reconnect policy with an exponential backoff. - pub fn new_exponential(max_attempts: u32, min_delay: u32, max_delay: u32, mult: u32) -> ReconnectPolicy { + pub fn new_exponential(max_attempts: u32, min_delay: u32, max_delay: u32, base: u32) -> ReconnectPolicy { ReconnectPolicy::Exponential { max_delay, max_attempts, min_delay, - mult, + base, attempts: 0, jitter: DEFAULT_JITTER_MS, } @@ -196,13 +196,22 @@ impl ReconnectPolicy { /// Read the number of reconnection attempts. pub fn attempts(&self) -> u32 { - match *self { + match self { ReconnectPolicy::Constant { ref attempts, .. } => *attempts, ReconnectPolicy::Linear { ref attempts, .. } => *attempts, ReconnectPolicy::Exponential { ref attempts, .. } => *attempts, } } + /// Read the max number of reconnection attempts. + pub fn max_attempts(&self) -> u32 { + match self { + ReconnectPolicy::Constant { ref max_attempts, .. } => *max_attempts, + ReconnectPolicy::Linear { ref max_attempts, .. } => *max_attempts, + ReconnectPolicy::Exponential { ref max_attempts, .. } => *max_attempts, + } + } + /// Whether the client should initiate a reconnect. pub(crate) fn should_reconnect(&self) -> bool { match *self { @@ -260,14 +269,14 @@ impl ReconnectPolicy { min_delay, max_delay, max_attempts, - mult, + base, jitter, } => { *attempts = match utils::incr_with_max(*attempts, max_attempts) { Some(a) => a, None => return None, }; - let delay = (mult as u64) + let delay = (base as u64) .saturating_pow(*attempts - 1) .saturating_mul(min_delay as u64); @@ -295,76 +304,6 @@ impl Default for Blocking { } } -/// Backpressure policies to apply when the max number of in-flight commands is reached on a connection. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum BackpressurePolicy { - /// Sleep for some amount of time before sending the next command. - Sleep { - /// Disable the backpressure scaling logic used to calculate the `sleep` duration when throttling commands. - /// - /// If `true` the client will always wait a constant amount of time defined by `min_sleep_duration_ms` when - /// throttling commands. Otherwise the sleep duration will scale based on the number of in-flight commands. - /// - /// Default: `false` - disable_backpressure_scaling: bool, - /// The minimum amount of time to wait when applying backpressure to a command. - /// - /// If `0` then no backpressure will be applied, but backpressure errors will not be surfaced to callers unless - /// `disable_auto_backpressure` is `true`. - /// - /// Default: 10 ms - min_sleep_duration: Duration, - }, - /// Wait for all in-flight commands to finish before sending the next command. - Drain, -} - -impl Default for BackpressurePolicy { - fn default() -> Self { - BackpressurePolicy::Drain - } -} - -impl BackpressurePolicy { - /// Create a new `Sleep` policy with the legacy default values. - pub fn default_sleep() -> Self { - BackpressurePolicy::Sleep { - disable_backpressure_scaling: false, - min_sleep_duration: Duration::from_millis(10), - } - } -} - -/// Configuration options for backpressure features in the client. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct BackpressureConfig { - /// Whether to disable the automatic backpressure features when pipelining is enabled. - /// - /// If `true` then `RedisErrorKind::Backpressure` errors may be surfaced to callers. Callers can set this to `true` - /// and `max_in_flight_commands` to `0` to effectively disable the backpressure logic. - /// - /// Default: `false` - pub disable_auto_backpressure: bool, - /// The maximum number of in-flight commands (per connection) before backpressure will be applied. - /// - /// Default: 10_000 - pub max_in_flight_commands: u64, - /// The backpressure policy to apply when the max number of in-flight commands is reached. - /// - /// Default: [Drain](crate::types::BackpressurePolicy::Drain). - pub policy: BackpressurePolicy, -} - -impl Default for BackpressureConfig { - fn default() -> Self { - BackpressureConfig { - disable_auto_backpressure: false, - max_in_flight_commands: 10_000, - policy: BackpressurePolicy::default(), - } - } -} - /// TCP configuration options. #[derive(Clone, Debug, Default)] pub struct TcpConfig { @@ -393,13 +332,13 @@ pub struct UnresponsiveConfig { /// considered unresponsive. /// /// If a connection is considered unresponsive it will be forcefully closed and the client will reconnect based on - /// the [ReconnectPolicy](crate::types::ReconnectPolicy). This heuristic can be useful in environments where - /// connections may close or change in subtle or unexpected ways. + /// the [ReconnectPolicy](crate::types::config::ReconnectPolicy). This heuristic can be useful in environments + /// where connections may close or change in subtle or unexpected ways. /// - /// Unlike the [timeout](crate::types::Options) and [default_command_timeout](crate::types::PerformanceConfig) - /// interfaces, any in-flight commands waiting on a response when the connection is closed this way will be - /// retried based on the associated [ReconnectPolicy](crate::types::ReconnectPolicy) and - /// [Options](crate::types::Options). + /// Unlike the [timeout](crate::types::config::Options) and + /// [default_command_timeout](crate::types::config::PerformanceConfig) interfaces, any in-flight commands waiting + /// on a response when the connection is closed this way will be retried based on the associated + /// [ReconnectPolicy](crate::types::config::ReconnectPolicy) and [Options](crate::types::config::Options). /// /// Default: `None` pub max_timeout: Option, @@ -472,7 +411,7 @@ pub struct ConnectionConfig { /// Unresponsive connection configuration options. pub unresponsive: UnresponsiveConfig, /// An unexpected `NOAUTH` error is treated the same as a general connection failure, causing the client to - /// reconnect based on the [ReconnectPolicy](crate::types::ReconnectPolicy). This is [recommended](https://github.com/StackExchange/StackExchange.Redis/issues/1273#issuecomment-651823824) if callers are using ElastiCache. + /// reconnect based on the [ReconnectPolicy](crate::types::config::ReconnectPolicy). This is [recommended](https://github.com/StackExchange/StackExchange.Redis/issues/1273#issuecomment-651823824) if callers are using ElastiCache. /// /// Default: `false` pub reconnect_on_auth_error: bool, @@ -482,7 +421,9 @@ pub struct ConnectionConfig { pub auto_client_setname: bool, /// Limit the size of the internal in-memory command queue. /// - /// Commands that exceed this limit will receive a `RedisErrorKind::Backpressure` error. + /// Commands that exceed this limit will receive a `ErrorKind::Backpressure` error. Setting this value to + /// anything > 0 will indicate that the client should use a bounded MPSC channel to communicate with the routing + /// task. /// /// See [command_queue_len](crate::interfaces::MetricsInterface::command_queue_len) for more information. /// @@ -522,6 +463,7 @@ pub struct ConnectionConfig { impl Default for ConnectionConfig { fn default() -> Self { + #[allow(deprecated)] ConnectionConfig { connection_timeout: Duration::from_millis(10_000), internal_command_timeout: Duration::from_millis(10_000), @@ -553,15 +495,6 @@ impl Default for ConnectionConfig { /// Configuration options that can affect the performance of the client. #[derive(Clone, Debug, Eq, PartialEq)] pub struct PerformanceConfig { - /// Whether the client should automatically pipeline commands across tasks when possible. - /// - /// The [Pipeline](crate::clients::Pipeline) interface can be used to pipeline commands within one task, - /// whereas this flag can automatically pipeline commands across tasks. - /// - /// Default: `true` - pub auto_pipeline: bool, - /// Configuration options for backpressure features in the client. - pub backpressure: BackpressureConfig, /// An optional timeout to apply to all commands. /// /// If `0` this will disable any timeout being applied to commands. Callers can also set timeouts on individual @@ -592,13 +525,11 @@ pub struct PerformanceConfig { impl Default for PerformanceConfig { fn default() -> Self { PerformanceConfig { - auto_pipeline: true, - backpressure: BackpressureConfig::default(), - default_command_timeout: Duration::from_millis(0), - max_feed_count: 200, - broadcast_channel_capacity: 32, + default_command_timeout: Duration::from_millis(0), + max_feed_count: 200, + broadcast_channel_capacity: 32, #[cfg(feature = "blocking-encoding")] - blocking_encode_threshold: 50_000_000, + blocking_encode_threshold: 50_000_000, } } } @@ -609,7 +540,7 @@ impl Default for PerformanceConfig { #[cfg_attr(docsrs, doc(cfg(feature = "credential-provider")))] pub trait CredentialProvider: Debug + Send + Sync + 'static { /// Read the username and password that should be used in the next `AUTH` or `HELLO` command. - async fn fetch(&self, server: Option<&Server>) -> Result<(Option, Option), RedisError>; + async fn fetch(&self, server: Option<&Server>) -> Result<(Option, Option), Error>; /// Configure the client to call [fetch](Self::fetch) and send `AUTH` or `HELLO` on some interval. fn refresh_interval(&self) -> Option { @@ -623,7 +554,7 @@ pub trait CredentialProvider: Debug + Send + Sync + 'static { #[cfg_attr(docsrs, doc(cfg(feature = "credential-provider")))] pub trait CredentialProvider: Debug + 'static { /// Read the username and password that should be used in the next `AUTH` or `HELLO` command. - async fn fetch(&self, server: Option<&Server>) -> Result<(Option, Option), RedisError>; + async fn fetch(&self, server: Option<&Server>) -> Result<(Option, Option), Error>; /// Configure the client to call [fetch](Self::fetch) and send `AUTH` or `HELLO` on some interval. fn refresh_interval(&self) -> Option { @@ -631,9 +562,9 @@ pub trait CredentialProvider: Debug + 'static { } } -/// Configuration options for a `RedisClient`. +/// Configuration options for a `Client`. #[derive(Clone, Debug)] -pub struct RedisConfig { +pub struct Config { /// Whether the client should return an error if it cannot connect to the server the first time when being /// initialized. If `false` the client will run the reconnect logic if it cannot connect to the server the first /// time, but if `true` the client will return initial connection errors to the caller immediately. @@ -669,7 +600,7 @@ pub struct RedisConfig { pub server: ServerConfig, /// The protocol version to use when communicating with the server(s). /// - /// If RESP3 is specified the client will automatically use `HELLO` when authenticating. **This requires Redis + /// If RESP3 is specified the client will automatically use `HELLO` when authenticating. **This requires version /// 6.0.0 or above.** If the `HELLO` command fails this will prevent the client from connecting. Callers should set /// this to RESP2 and use `HELLO` manually to fall back to RESP2 if needed. /// @@ -718,13 +649,13 @@ pub struct RedisConfig { /// Default: `None` /// /// When used with the `sentinel-auth` feature this interface will take precedence over all `username` and - /// `password` fields for both sentinel nodes and Redis servers. + /// `password` fields for both sentinel nodes and servers. #[cfg(feature = "credential-provider")] #[cfg_attr(docsrs, doc(cfg(feature = "credential-provider")))] pub credential_provider: Option>, } -impl PartialEq for RedisConfig { +impl PartialEq for Config { fn eq(&self, other: &Self) -> bool { self.server == other.server && self.database == other.database @@ -736,11 +667,11 @@ impl PartialEq for RedisConfig { } } -impl Eq for RedisConfig {} +impl Eq for Config {} -impl Default for RedisConfig { +impl Default for Config { fn default() -> Self { - RedisConfig { + Config { fail_fast: true, blocking: Blocking::default(), username: None, @@ -765,7 +696,7 @@ impl Default for RedisConfig { } #[cfg_attr(docsrs, allow(rustdoc::broken_intra_doc_links))] -impl RedisConfig { +impl Config { /// Whether the client uses TLS. #[cfg(any( feature = "enable-native-tls", @@ -818,7 +749,7 @@ impl RedisConfig { false } - /// Parse a URL string into a `RedisConfig`. + /// Parse a URL string into a `Config`. /// /// # URL Syntax /// @@ -851,15 +782,15 @@ impl RedisConfig { /// /// This function will use the URL scheme to determine which server type the caller is using. Valid schemes include: /// - /// * `redis` - TCP connected to a centralized server. - /// * `rediss` - TLS connected to a centralized server. - /// * `redis-cluster` - TCP connected to a cluster. - /// * `rediss-cluster` - TLS connected to a cluster. - /// * `redis-sentinel` - TCP connected to a centralized server behind a sentinel layer. - /// * `rediss-sentinel` - TLS connected to a centralized server behind a sentinel layer. - /// * `redis+unix` - Unix domain socket followed by a path. + /// * `redis|valkey` - TCP connected to a centralized server. + /// * `rediss|valkeys` - TLS connected to a centralized server. + /// * `redis-cluster|valkey-cluster` - TCP connected to a cluster. + /// * `rediss-cluster|valkeys-cluster` - TLS connected to a cluster. + /// * `redis-sentinel|valkey-sentinel` - TCP connected to a centralized server behind a sentinel layer. + /// * `rediss-sentinel|valkeys-sentinel` - TLS connected to a centralized server behind a sentinel layer. + /// * `redis+unix|valkey+unix` - Unix domain socket followed by a path. /// - /// **The `rediss` scheme prefix requires one of the TLS feature flags.** + /// **The `rediss|valkeys` scheme prefix requires one of the TLS feature flags.** /// /// # Query Parameters /// @@ -867,39 +798,39 @@ impl RedisConfig { /// example). The following query parameters may also be used in their respective contexts: /// /// * `node` - Specify another node in the topology. In a cluster this would refer to any other known cluster node. - /// In the context of a Redis sentinel layer this refers to a known **sentinel** node. Multiple `node` parameters - /// may be used in a URL. + /// In the context of a sentinel layer this refers to a known **sentinel** node. Multiple `node` parameters may be + /// used in a URL. /// * `sentinelServiceName` - Specify the name of the sentinel service. This is required when using the /// `redis-sentinel` scheme. /// * `sentinelUsername` - Specify the username to use when connecting to a **sentinel** node. This requires the /// `sentinel-auth` feature and allows the caller to use different credentials for sentinel nodes vs the actual - /// Redis server. The `username` part of the URL immediately following the scheme will refer to the username used - /// when connecting to the backing Redis server. + /// server. The `username` part of the URL immediately following the scheme will refer to the username used when + /// connecting to the backing server. /// * `sentinelPassword` - Specify the password to use when connecting to a **sentinel** node. This requires the /// `sentinel-auth` feature and allows the caller to use different credentials for sentinel nodes vs the actual - /// Redis server. The `password` part of the URL immediately following the scheme will refer to the password used - /// when connecting to the backing Redis server. + /// server. The `password` part of the URL immediately following the scheme will refer to the password used when + /// connecting to the backing server. /// /// See the [from_url_centralized](Self::from_url_centralized), [from_url_clustered](Self::from_url_clustered), /// [from_url_sentinel](Self::from_url_sentinel), and [from_url_unix](Self::from_url_unix) for more information. Or - /// see the [RedisConfig](Self) unit tests for examples. - pub fn from_url(url: &str) -> Result { + /// see the [Config](Self) unit tests for examples. + pub fn from_url(url: &str) -> Result { let parsed_url = Url::parse(url)?; if utils::url_is_clustered(&parsed_url) { - RedisConfig::from_url_clustered(url) + Config::from_url_clustered(url) } else if utils::url_is_sentinel(&parsed_url) { - RedisConfig::from_url_sentinel(url) + Config::from_url_sentinel(url) } else if utils::url_is_unix_socket(&parsed_url) { #[cfg(feature = "unix-sockets")] - return RedisConfig::from_url_unix(url); + return Config::from_url_unix(url); #[allow(unreachable_code)] - Err(RedisError::new(RedisErrorKind::Config, "Missing unix-socket feature.")) + Err(Error::new(ErrorKind::Config, "Missing unix-socket feature.")) } else { - RedisConfig::from_url_centralized(url) + Config::from_url_centralized(url) } } - /// Create a centralized `RedisConfig` struct from a URL. + /// Create a centralized `Config` struct from a URL. /// /// ```text /// redis://username:password@foo.com:6379/1 @@ -917,13 +848,13 @@ impl RedisConfig { /// * A database can be defined in the `path` section. /// * The `port` field is optional in this context. If it is not specified then `6379` will be used. /// * Any `node` or sentinel query parameters will be ignored. - pub fn from_url_centralized(url: &str) -> Result { + pub fn from_url_centralized(url: &str) -> Result { let (url, host, port, _tls) = utils::parse_url(url, Some(6379))?; let server = ServerConfig::new_centralized(host, port); let database = utils::parse_url_db(&url)?; let (username, password) = utils::parse_url_credentials(&url)?; - Ok(RedisConfig { + Ok(Config { server, username, password, @@ -934,11 +865,11 @@ impl RedisConfig { feature = "enable-rustls-ring" ))] tls: utils::tls_config_from_url(_tls)?, - ..RedisConfig::default() + ..Config::default() }) } - /// Create a clustered `RedisConfig` struct from a URL. + /// Create a clustered `Config` struct from a URL. /// /// ```text /// redis-cluster://username:password@foo.com:30001?node=bar.com:30002&node=baz.com:30003 @@ -958,7 +889,7 @@ impl RedisConfig { /// * The `port` field is required in this context alongside any hostname. /// * Any `node` query parameters will be used to find other known cluster nodes. /// * Any sentinel query parameters will be ignored. - pub fn from_url_clustered(url: &str) -> Result { + pub fn from_url_clustered(url: &str) -> Result { let (url, host, port, _tls) = utils::parse_url(url, Some(6379))?; let mut cluster_nodes = utils::parse_url_other_nodes(&url)?; cluster_nodes.push(Server::new(host, port)); @@ -968,7 +899,7 @@ impl RedisConfig { }; let (username, password) = utils::parse_url_credentials(&url)?; - Ok(RedisConfig { + Ok(Config { server, username, password, @@ -978,11 +909,11 @@ impl RedisConfig { feature = "enable-rustls-ring" ))] tls: utils::tls_config_from_url(_tls)?, - ..RedisConfig::default() + ..Config::default() }) } - /// Create a sentinel `RedisConfig` struct from a URL. + /// Create a sentinel `Config` struct from a URL. /// /// ```text /// redis-sentinel://username:password@foo.com:6379/1?sentinelServiceName=fakename&node=foo.com:30001&node=bar.com:30002 @@ -1006,16 +937,16 @@ impl RedisConfig { /// * Depending on the cargo features used other sentinel query parameters may be used. /// /// This particular function is more complex than the others when the `sentinel-auth` feature is used. For example, - /// to declare a config that uses different credentials for the sentinel nodes vs the backing Redis servers: + /// to declare a config that uses different credentials for the sentinel nodes vs the backing servers: /// /// ```text /// redis-sentinel://username1:password1@foo.com:26379/1?sentinelServiceName=fakename&sentinelUsername=username2&sentinelPassword=password2&node=bar.com:26379&node=baz.com:26380 /// ``` /// - /// The above example will use `("username1", "password1")` when authenticating to the backing Redis servers, and + /// The above example will use `("username1", "password1")` when authenticating to the backing servers, and /// `("username2", "password2")` when initially connecting to the sentinel nodes. Additionally, all 3 addresses /// (`foo.com:26379`, `bar.com:26379`, `baz.com:26380`) specify known **sentinel** nodes. - pub fn from_url_sentinel(url: &str) -> Result { + pub fn from_url_sentinel(url: &str) -> Result { let (url, host, port, _tls) = utils::parse_url(url, Some(26379))?; let mut other_nodes = utils::parse_url_other_nodes(&url)?; other_nodes.push(Server::new(host, port)); @@ -1031,7 +962,7 @@ impl RedisConfig { password: utils::parse_url_sentinel_password(&url), }; - Ok(RedisConfig { + Ok(Config { server, username, password, @@ -1042,11 +973,11 @@ impl RedisConfig { feature = "enable-rustls-ring" ))] tls: utils::tls_config_from_url(_tls)?, - ..RedisConfig::default() + ..Config::default() }) } - /// Create a `RedisConfig` from a URL that connects via a Unix domain socket. + /// Create a `Config` from a URL that connects via a Unix domain socket. /// /// ```text /// redis+unix:///path/to/redis.sock @@ -1057,18 +988,18 @@ impl RedisConfig { /// /// * In the other URL parsing functions the path section indicates the database that the client should `SELECT` /// after connecting. However, Unix sockets are also specified by a path rather than a hostname:port, which - /// creates some ambiguity in this case. Callers should manually set the database field on the returned - /// `RedisConfig` if needed. + /// creates some ambiguity in this case. Callers should manually set the database field on the returned `Config` + /// if needed. /// * If credentials are provided the caller must also specify a hostname in order to pass to the [URL /// validation](Url::parse) process. This function will ignore the value, but some non-empty string must be /// provided. #[cfg(feature = "unix-sockets")] #[cfg_attr(docsrs, doc(cfg(feature = "unix-sockets")))] - pub fn from_url_unix(url: &str) -> Result { + pub fn from_url_unix(url: &str) -> Result { let (url, path) = utils::parse_unix_url(url)?; let (username, password) = utils::parse_url_credentials(&url)?; - Ok(RedisConfig { + Ok(Config { server: ServerConfig::Unix { path }, username, password, @@ -1077,7 +1008,7 @@ impl RedisConfig { } } -/// Connection configuration for the Redis server. +/// Connection configuration for the server. #[derive(Clone, Debug, Eq, PartialEq)] pub enum ServerConfig { Centralized { @@ -1098,7 +1029,7 @@ pub enum ServerConfig { Unix { /// The path to the Unix socket. /// - /// Any associated [Server](crate::types::Server) identifiers will use this value as the `host`. + /// Any associated [Server](crate::types::config::Server) identifiers will use this value as the `host`. path: PathBuf, }, Sentinel { @@ -1184,7 +1115,8 @@ impl ServerConfig { } } - /// Create a clustered config with the same defaults as specified in the `create-cluster` script provided by Redis. + /// Create a clustered config with the same defaults as specified in the `create-cluster` script provided by Redis + /// or Valkey. pub fn default_clustered() -> ServerConfig { ServerConfig::Clustered { hosts: vec![ @@ -1232,12 +1164,12 @@ impl ServerConfig { } /// Set the [ClusterDiscoveryPolicy], if possible. - pub fn set_cluster_discovery_policy(&mut self, new_policy: ClusterDiscoveryPolicy) -> Result<(), RedisError> { + pub fn set_cluster_discovery_policy(&mut self, new_policy: ClusterDiscoveryPolicy) -> Result<(), Error> { if let ServerConfig::Clustered { ref mut policy, .. } = self { *policy = new_policy; Ok(()) } else { - Err(RedisError::new(RedisErrorKind::Config, "Expected clustered config.")) + Err(Error::new(ErrorKind::Config, "Expected clustered config.")) } } } @@ -1360,9 +1292,9 @@ impl Default for SentinelConfig { #[doc(hidden)] #[cfg(feature = "sentinel-client")] -impl From for RedisConfig { +impl From for Config { fn from(config: SentinelConfig) -> Self { - RedisConfig { + Config { server: ServerConfig::Centralized { server: Server::new(config.host, config.port), }, @@ -1394,14 +1326,14 @@ impl From for RedisConfig { /// /// ```rust /// # use fred::prelude::*; -/// async fn example() -> Result<(), RedisError> { +/// async fn example() -> Result<(), Error> { /// let options = Options { /// max_attempts: Some(10), /// max_redirections: Some(2), /// ..Default::default() /// }; /// -/// let client = RedisClient::default(); +/// let client = Client::default(); /// client.init().await?; /// let _: () = client.with_options(&options).get("foo").await?; /// @@ -1425,7 +1357,7 @@ pub struct Options { pub timeout: Option, /// The cluster node that should receive the command. /// - /// The caller will receive a `RedisErrorKind::Cluster` error if the provided server does not exist. + /// The caller will receive a `ErrorKind::Cluster` error if the provided server does not exist. /// /// The client will still follow redirection errors via this interface. Callers may not notice this, but incorrect /// server arguments here could result in unnecessary calls to refresh the cached cluster routing table. @@ -1434,8 +1366,6 @@ pub struct Options { /// /// If `cluster_node` is also provided it will take precedence over this value. pub cluster_hash: Option, - /// Whether to skip backpressure checks for a command. - pub no_backpressure: bool, /// Whether the command should fail quickly if the connection is not healthy or available for writes. This always /// takes precedence over `max_attempts` if `true`. /// @@ -1468,7 +1398,6 @@ impl Options { if let Some(ref cluster_hash) = other.cluster_hash { self.cluster_hash = Some(cluster_hash.clone()); } - self.no_backpressure |= other.no_backpressure; self.fail_fast |= other.fail_fast; #[cfg(feature = "i-tracking")] @@ -1481,12 +1410,11 @@ impl Options { /// Create options from a command #[cfg(feature = "transactions")] - pub(crate) fn from_command(cmd: &RedisCommand) -> Self { + pub(crate) fn from_command(cmd: &Command) -> Self { Options { max_attempts: Some(cmd.attempts_remaining), max_redirections: Some(cmd.redirections_remaining), timeout: cmd.timeout_dur, - no_backpressure: cmd.skip_backpressure, cluster_node: cmd.cluster_node.clone(), cluster_hash: Some(cmd.hasher.clone()), fail_fast: cmd.fail_fast, @@ -1496,8 +1424,7 @@ impl Options { } /// Overwrite the configuration options on the provided command. - pub(crate) fn apply(&self, command: &mut RedisCommand) { - command.skip_backpressure = self.no_backpressure; + pub(crate) fn apply(&self, command: &mut Command) { command.timeout_dur = self.timeout; command.cluster_node = self.cluster_node.clone(); command.fail_fast = self.fail_fast; @@ -1522,69 +1449,69 @@ impl Options { #[cfg(test)] mod tests { #[cfg(feature = "sentinel-auth")] - use crate::types::Server; + use crate::types::config::Server; #[allow(unused_imports)] - use crate::{prelude::ServerConfig, types::RedisConfig, utils}; + use crate::{prelude::ServerConfig, types::config::Config, utils}; #[test] fn should_parse_centralized_url() { let url = "redis://username:password@foo.com:6379/1"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_centralized("foo.com", 6379), database: Some(1), username: Some("username".into()), password: Some("password".into()), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_centralized(url).unwrap(); + let actual = Config::from_url_centralized(url).unwrap(); assert_eq!(actual, expected); } #[test] fn should_parse_centralized_url_without_port() { let url = "redis://foo.com"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_centralized("foo.com", 6379), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_centralized(url).unwrap(); + let actual = Config::from_url_centralized(url).unwrap(); assert_eq!(actual, expected); } #[test] fn should_parse_centralized_url_without_creds() { let url = "redis://foo.com:6379/1"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_centralized("foo.com", 6379), database: Some(1), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_centralized(url).unwrap(); + let actual = Config::from_url_centralized(url).unwrap(); assert_eq!(actual, expected); } #[test] fn should_parse_centralized_url_without_db() { let url = "redis://username:password@foo.com:6379"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_centralized("foo.com", 6379), username: Some("username".into()), password: Some("password".into()), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_centralized(url).unwrap(); + let actual = Config::from_url_centralized(url).unwrap(); assert_eq!(actual, expected); } @@ -1592,79 +1519,79 @@ mod tests { #[cfg(feature = "enable-native-tls")] fn should_parse_centralized_url_with_tls() { let url = "rediss://username:password@foo.com:6379/1"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_centralized("foo.com", 6379), database: Some(1), username: Some("username".into()), password: Some("password".into()), tls: utils::tls_config_from_url(true).unwrap(), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_centralized(url).unwrap(); + let actual = Config::from_url_centralized(url).unwrap(); assert_eq!(actual, expected); } #[test] fn should_parse_clustered_url() { let url = "redis-cluster://username:password@foo.com:30000"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_clustered(vec![("foo.com", 30000)]), username: Some("username".into()), password: Some("password".into()), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_clustered(url).unwrap(); + let actual = Config::from_url_clustered(url).unwrap(); assert_eq!(actual, expected); } #[test] fn should_parse_clustered_url_without_port() { let url = "redis-cluster://foo.com"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_clustered(vec![("foo.com", 6379)]), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_clustered(url).unwrap(); + let actual = Config::from_url_clustered(url).unwrap(); assert_eq!(actual, expected); } #[test] fn should_parse_clustered_url_without_creds() { let url = "redis-cluster://foo.com:30000"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_clustered(vec![("foo.com", 30000)]), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_clustered(url).unwrap(); + let actual = Config::from_url_clustered(url).unwrap(); assert_eq!(actual, expected); } #[test] fn should_parse_clustered_url_with_other_nodes() { let url = "redis-cluster://username:password@foo.com:30000?node=bar.com:30001&node=baz.com:30002"; - let expected = RedisConfig { + let expected = Config { // need to be careful with the array ordering here server: ServerConfig::new_clustered(vec![("bar.com", 30001), ("baz.com", 30002), ("foo.com", 30000)]), username: Some("username".into()), password: Some("password".into()), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_clustered(url).unwrap(); + let actual = Config::from_url_clustered(url).unwrap(); assert_eq!(actual, expected); } @@ -1672,34 +1599,34 @@ mod tests { #[cfg(feature = "enable-native-tls")] fn should_parse_clustered_url_with_tls() { let url = "rediss-cluster://username:password@foo.com:30000"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_clustered(vec![("foo.com", 30000)]), username: Some("username".into()), password: Some("password".into()), tls: utils::tls_config_from_url(true).unwrap(), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_clustered(url).unwrap(); + let actual = Config::from_url_clustered(url).unwrap(); assert_eq!(actual, expected); } #[test] fn should_parse_sentinel_url() { let url = "redis-sentinel://username:password@foo.com:26379/1?sentinelServiceName=fakename"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_sentinel(vec![("foo.com", 26379)], "fakename"), username: Some("username".into()), password: Some("password".into()), database: Some(1), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_sentinel(url).unwrap(); + let actual = Config::from_url_sentinel(url).unwrap(); assert_eq!(actual, expected); } @@ -1707,7 +1634,7 @@ mod tests { fn should_parse_sentinel_url_with_other_nodes() { let url = "redis-sentinel://username:password@foo.com:26379/1?sentinelServiceName=fakename&node=bar.com:26380&\ node=baz.com:26381"; - let expected = RedisConfig { + let expected = Config { // also need to be careful with array ordering here server: ServerConfig::new_sentinel( vec![("bar.com", 26380), ("baz.com", 26381), ("foo.com", 26379)], @@ -1716,12 +1643,12 @@ mod tests { username: Some("username".into()), password: Some("password".into()), database: Some(1), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_sentinel(url).unwrap(); + let actual = Config::from_url_sentinel(url).unwrap(); assert_eq!(actual, expected); } @@ -1729,7 +1656,7 @@ mod tests { #[cfg(feature = "unix-sockets")] fn should_parse_unix_socket_url_no_auth() { let url = "redis+unix:///path/to/redis.sock"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::Unix { path: "/path/to/redis.sock".into(), }, @@ -1738,9 +1665,9 @@ mod tests { ..Default::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_unix(url).unwrap(); + let actual = Config::from_url_unix(url).unwrap(); assert_eq!(actual, expected); } @@ -1748,7 +1675,7 @@ mod tests { #[cfg(feature = "unix-sockets")] fn should_parse_unix_socket_url_with_auth() { let url = "redis+unix://username:password@foo/path/to/redis.sock"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::Unix { path: "/path/to/redis.sock".into(), }, @@ -1757,9 +1684,9 @@ mod tests { ..Default::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_unix(url).unwrap(); + let actual = Config::from_url_unix(url).unwrap(); assert_eq!(actual, expected); } @@ -1767,18 +1694,18 @@ mod tests { #[cfg(feature = "enable-native-tls")] fn should_parse_sentinel_url_with_tls() { let url = "rediss-sentinel://username:password@foo.com:26379/1?sentinelServiceName=fakename"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::new_sentinel(vec![("foo.com", 26379)], "fakename"), username: Some("username".into()), password: Some("password".into()), database: Some(1), tls: utils::tls_config_from_url(true).unwrap(), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_sentinel(url).unwrap(); + let actual = Config::from_url_sentinel(url).unwrap(); assert_eq!(actual, expected); } @@ -1787,7 +1714,7 @@ mod tests { fn should_parse_sentinel_url_with_sentinel_auth() { let url = "redis-sentinel://username1:password1@foo.com:26379/1?sentinelServiceName=fakename&\ sentinelUsername=username2&sentinelPassword=password2"; - let expected = RedisConfig { + let expected = Config { server: ServerConfig::Sentinel { hosts: vec![Server::new("foo.com", 26379)], service_name: "fakename".into(), @@ -1797,12 +1724,12 @@ mod tests { username: Some("username1".into()), password: Some("password1".into()), database: Some(1), - ..RedisConfig::default() + ..Config::default() }; - let actual = RedisConfig::from_url(url).unwrap(); + let actual = Config::from_url(url).unwrap(); assert_eq!(actual, expected); - let actual = RedisConfig::from_url_sentinel(url).unwrap(); + let actual = Config::from_url_sentinel(url).unwrap(); assert_eq!(actual, expected); } } diff --git a/src/types/from_tuple.rs b/src/types/from_tuple.rs index eaf203c7..3d4bfc91 100644 --- a/src/types/from_tuple.rs +++ b/src/types/from_tuple.rs @@ -1,14 +1,14 @@ -use crate::types::{MultipleKeys, RedisKey, RedisValue}; +use crate::types::{Key, MultipleKeys, Value}; macro_rules! tuple2val { ($($id:tt $ty:ident);+) => { -impl<$($ty: Into),+ > From<($($ty),+)> for RedisValue { +impl<$($ty: Into),+ > From<($($ty),+)> for Value { fn from(value: ($($ty),+)) -> Self { - RedisValue::Array(vec![$(value.$id.into()),+]) + Value::Array(vec![$(value.$id.into()),+]) } } -impl<$($ty: Into),+ > From<($($ty),+)> for MultipleKeys { +impl<$($ty: Into),+ > From<($($ty),+)> for MultipleKeys { fn from(value: ($($ty),+)) -> Self { Self{keys:vec![$(value.$id.into()),+]} } diff --git a/src/types/geo.rs b/src/types/geo.rs index 02ac4378..7483d6c0 100644 --- a/src/types/geo.rs +++ b/src/types/geo.rs @@ -1,4 +1,4 @@ -use crate::{error::RedisError, protocol::utils as protocol_utils, types::RedisValue, utils}; +use crate::{error::Error, protocol::utils as protocol_utils, types::Value, utils}; use bytes_utils::Str; use std::{ collections::VecDeque, @@ -29,10 +29,10 @@ impl From<(f64, f64)> for GeoPosition { } } -impl TryFrom for GeoPosition { - type Error = RedisError; +impl TryFrom for GeoPosition { + type Error = Error; - fn try_from(value: RedisValue) -> Result { + fn try_from(value: Value) -> Result { let (longitude, latitude): (f64, f64) = value.convert()?; Ok(GeoPosition { longitude, latitude }) } @@ -62,15 +62,15 @@ impl GeoUnit { #[derive(Debug, Clone, Eq, PartialEq)] pub struct GeoValue { pub coordinates: GeoPosition, - pub member: RedisValue, + pub member: Value, } impl TryFrom<(f64, f64, T)> for GeoValue where - T: TryInto, - T::Error: Into, + T: TryInto, + T::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(v: (f64, f64, T)) -> Result { Ok(GeoValue { @@ -122,7 +122,7 @@ impl From> for MultipleGeoValues { /// A typed struct representing the full output of the GEORADIUS (or similar) command. #[derive(Clone, Debug)] pub struct GeoRadiusInfo { - pub member: RedisValue, + pub member: Value, pub position: Option, pub distance: Option, pub hash: Option, @@ -131,7 +131,7 @@ pub struct GeoRadiusInfo { impl Default for GeoRadiusInfo { fn default() -> Self { GeoRadiusInfo { - member: RedisValue::Null, + member: Value::Null, position: None, distance: None, hash: None, @@ -152,13 +152,8 @@ impl Eq for GeoRadiusInfo {} impl GeoRadiusInfo { /// Parse the value with context from the calling command. - pub fn from_redis_value( - value: RedisValue, - withcoord: bool, - withdist: bool, - withhash: bool, - ) -> Result { - if let RedisValue::Array(mut data) = value { + pub fn from_value(value: Value, withcoord: bool, withdist: bool, withhash: bool) -> Result { + if let Value::Array(mut data) = value { let mut out = GeoRadiusInfo::default(); data.reverse(); diff --git a/src/types/mod.rs b/src/types/mod.rs index 1148edea..310ca01f 100644 --- a/src/types/mod.rs +++ b/src/types/mod.rs @@ -1,70 +1,62 @@ -pub use crate::modules::response::{FromRedis, FromRedisKey}; -use crate::{error::RedisError, runtime::JoinHandle}; +pub use crate::modules::response::{FromKey, FromValue}; +use crate::{error::Error, runtime::JoinHandle}; pub use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, RespVersion}; mod args; mod builder; -#[cfg(feature = "i-client")] -mod client; -#[cfg(feature = "i-cluster")] -mod cluster; -mod config; -mod from_tuple; -#[cfg(feature = "i-geo")] -mod geo; -#[cfg(feature = "i-lists")] -mod lists; -mod misc; -mod multiple; -#[cfg(feature = "i-redisearch")] -mod redisearch; -mod scan; -#[cfg(feature = "i-scripts")] -mod scripts; -#[cfg(feature = "i-sorted-sets")] -mod sorted_sets; -#[cfg(feature = "i-streams")] -mod streams; -#[cfg(feature = "i-time-series")] -mod timeseries; -#[cfg(feature = "metrics")] -#[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] -pub use crate::modules::metrics::Stats; -pub use args::*; -pub use builder::*; +/// Types used to inspect or operate on client connections. #[cfg(feature = "i-client")] #[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] -pub use client::*; +pub mod client; +/// Types used to inspect or operate on clusters or cluster connections. #[cfg(feature = "i-cluster")] #[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] -pub use cluster::*; -pub use config::*; +pub mod cluster; +mod common; +/// Types used to configure clients or commands. +pub mod config; +mod from_tuple; +/// Types used with the `GEO` interface. #[cfg(feature = "i-geo")] #[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] -pub use geo::*; +pub mod geo; +/// Types used wih the lists interface. #[cfg(feature = "i-lists")] #[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] -pub use lists::*; -pub use misc::*; -pub use multiple::*; +pub mod lists; +mod multiple; +/// Types used with the `i-redisearch` interface. #[cfg(feature = "i-redisearch")] #[cfg_attr(docsrs, doc(cfg(feature = "i-redisearch")))] -pub use redisearch::*; -pub use scan::*; +pub mod redisearch; +/// Types used to scan servers. +pub mod scan; +/// Types related to Lua scripts or functions. #[cfg(feature = "i-scripts")] #[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] -pub use scripts::*; -pub use semver::Version; +pub mod scripts; +/// Types used in the sorted sets interface. #[cfg(feature = "i-sorted-sets")] #[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] -pub use sorted_sets::*; +pub mod sorted_sets; +/// Types used in the streams interface. #[cfg(feature = "i-streams")] #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] -pub use streams::*; +pub mod streams; +/// Types used with the `i-time-series` interface. #[cfg(feature = "i-time-series")] #[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] -pub use timeseries::*; +pub mod timeseries; + +#[cfg(feature = "metrics")] +#[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] +pub use crate::modules::metrics::Stats; +pub use args::*; +pub use builder::*; +pub use common::*; +pub use multiple::*; +pub use semver::Version; #[cfg(feature = "dns")] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] @@ -75,7 +67,7 @@ pub(crate) static QUEUED: &str = "QUEUED"; /// The ANY flag used on certain GEO commands. pub type Any = bool; /// The result from any of the `connect` functions showing the error that closed the connection, if any. -pub type ConnectHandle = JoinHandle>; +pub type ConnectHandle = JoinHandle>; /// A tuple of `(offset, count)` values for commands that allow paging through results. pub type Limit = (i64, i64); /// An argument type equivalent to "[LIMIT count]". diff --git a/src/types/multiple.rs b/src/types/multiple.rs index 16708c8b..db2628d7 100644 --- a/src/types/multiple.rs +++ b/src/types/multiple.rs @@ -1,4 +1,4 @@ -use crate::types::{RedisKey, RedisValue}; +use crate::types::{Key, Value}; use std::{collections::VecDeque, iter::FromIterator}; /// Convenience struct for commands that take 1 or more keys. @@ -7,7 +7,7 @@ use std::{collections::VecDeque, iter::FromIterator}; /// `Into`.** This is mostly useful for `EVAL` and `EVALSHA`. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MultipleKeys { - pub(crate) keys: Vec, + pub(crate) keys: Vec, } impl MultipleKeys { @@ -15,11 +15,11 @@ impl MultipleKeys { MultipleKeys { keys: Vec::new() } } - pub fn inner(self) -> Vec { + pub fn inner(self) -> Vec { self.keys } - pub fn into_values(self) -> Vec { + pub fn into_values(self) -> Vec { self.keys.into_iter().map(|k| k.into()).collect() } @@ -28,8 +28,8 @@ impl MultipleKeys { } } -impl From> for MultipleKeys { - fn from(key: Option) -> Self { +impl From> for MultipleKeys { + fn from(key: Option) -> Self { let keys = if let Some(key) = key { vec![key] } else { vec![] }; MultipleKeys { keys } } @@ -37,7 +37,7 @@ impl From> for MultipleKeys { impl From for MultipleKeys where - T: Into, + T: Into, { fn from(d: T) -> Self { MultipleKeys { keys: vec![d.into()] } @@ -46,7 +46,7 @@ where impl FromIterator for MultipleKeys where - T: Into, + T: Into, { fn from_iter>(iter: I) -> Self { MultipleKeys { @@ -57,7 +57,7 @@ where impl<'a, K, const N: usize> From<&'a [K; N]> for MultipleKeys where - K: Into + Clone, + K: Into + Clone, { fn from(value: &'a [K; N]) -> Self { MultipleKeys { @@ -68,7 +68,7 @@ where impl From> for MultipleKeys where - T: Into, + T: Into, { fn from(d: Vec) -> Self { MultipleKeys { @@ -79,7 +79,7 @@ where impl From> for MultipleKeys where - T: Into, + T: Into, { fn from(d: VecDeque) -> Self { MultipleKeys { @@ -98,7 +98,7 @@ impl From<()> for MultipleKeys { pub type MultipleStrings = MultipleKeys; /// Convenience interface for commands that take 1 or more values. -pub type MultipleValues = RedisValue; +pub type MultipleValues = Value; /// A convenience struct for functions that take one or more hash slot values. pub struct MultipleHashSlots { diff --git a/src/types/redisearch.rs b/src/types/redisearch.rs index 081080b4..23ed4b9e 100644 --- a/src/types/redisearch.rs +++ b/src/types/redisearch.rs @@ -1,5 +1,12 @@ use crate::{ - types::{GeoPosition, GeoUnit, Limit, RedisKey, RedisValue, SortOrder, ZRange}, + types::{ + geo::{GeoPosition, GeoUnit}, + sorted_sets::ZRange, + Key, + Limit, + SortOrder, + Value, + }, utils, }; use bytes::Bytes; @@ -191,7 +198,7 @@ impl FtAggregateOptions { /// Arguments for `FILTER` in `FT.SEARCH`. /// -/// Callers should use the `*Score*` variants on any provided [ZRange](crate::types::ZRange) values. +/// Callers should use the `*Score*` variants on any provided [ZRange](crate::types::sorted_sets::ZRange) values. #[derive(Clone, Debug)] pub struct SearchFilter { pub attribute: Str, @@ -204,7 +211,7 @@ pub struct SearchFilter { pub struct SearchGeoFilter { pub attribute: Str, pub position: GeoPosition, - pub radius: RedisValue, + pub radius: Value, pub units: GeoUnit, } @@ -247,7 +254,7 @@ pub struct FtSearchOptions { pub withsortkeys: bool, pub filters: Vec, pub geofilters: Vec, - pub inkeys: Vec, + pub inkeys: Vec, pub infields: Vec, pub r#return: Vec, pub summarize: Option, @@ -432,7 +439,7 @@ pub enum SearchSchemaKind { }, Custom { name: Str, - arguments: Vec, + arguments: Vec, }, } diff --git a/src/types/scan.rs b/src/types/scan.rs index 08e84828..f4cdf575 100644 --- a/src/types/scan.rs +++ b/src/types/scan.rs @@ -1,15 +1,14 @@ use crate::{ - clients::RedisClient, - error::RedisError, + clients::Client, interfaces, - modules::inner::RedisClientInner, + modules::inner::ClientInner, protocol::{ - command::{RedisCommand, RedisCommandKind}, + command::{Command, CommandKind}, responders::ResponseKind, types::{KeyScanInner, ValueScanInner}, }, runtime::RefCount, - types::{RedisKey, RedisMap, RedisValue}, + types::{Key, Map, Value}, utils, }; use bytes_utils::Str; @@ -61,40 +60,39 @@ pub trait Scanner { /// afterwards. fn take_results(&mut self) -> Option; - /// A lightweight function to create a Redis client from the SCAN result. + /// A lightweight function to create a client from the SCAN result. /// /// To continue scanning the caller should call `next` on this struct. Calling `scan` again on the client will /// initiate a new SCAN call starting with a cursor of 0. - fn create_client(&self) -> RedisClient; + fn create_client(&self) -> Client; /// Move on to the next page of results from the SCAN operation. If no more results are available this may close the - /// stream. + /// stream. This interface provides a mechanism for throttling the throughput of the SCAN call /// /// If callers do not call this function the scanning will continue when this struct is dropped. Results are not /// automatically scanned in the background since this could cause the buffer backing the stream to grow too large - /// very quickly. This interface provides a mechanism for throttling the throughput of the SCAN call. Callers can - /// use [scan_buffered](crate::clients::RedisClient::scan_buffered) or - /// [scan_cluster_buffered](crate::clients::RedisClient::scan_cluster_buffered) to automatically continue scanning + /// very quickly. Callers can use [scan_buffered](crate::clients::Client::scan_buffered) or + /// [scan_cluster_buffered](crate::clients::Client::scan_cluster_buffered) to automatically continue scanning /// in the background. - /// - /// If this function returns an error the scan call cannot continue as the client has been closed, or some other - /// fatal error has occurred. If this happens the error will appear in the stream from the original SCAN call. - fn next(self) -> Result<(), RedisError>; + fn next(self); + + /// Stop the scanning process, ending the outer stream. + fn cancel(self); } /// The result of a SCAN operation. pub struct ScanResult { - pub(crate) results: Option>, - pub(crate) inner: RefCount, + pub(crate) results: Option>, + pub(crate) inner: RefCount, pub(crate) scan_state: Option, pub(crate) can_continue: bool, } -fn next_key_page(inner: &RefCount, state: &mut Option) { +fn next_key_page(inner: &RefCount, state: &mut Option) { if let Some(state) = state.take() { let cluster_node = state.server.clone(); let response = ResponseKind::KeyScan(state); - let mut cmd: RedisCommand = (RedisCommandKind::Scan, Vec::new(), response).into(); + let mut cmd: Command = (CommandKind::Scan, Vec::new(), response).into(); cmd.cluster_node = cluster_node; let _ = interfaces::default_send_command(inner, cmd); @@ -110,7 +108,7 @@ impl Drop for ScanResult { } impl Scanner for ScanResult { - type Page = Vec; + type Page = Vec; fn cursor(&self) -> Option> { if let Some(ref state) = self.scan_state { @@ -132,36 +130,38 @@ impl Scanner for ScanResult { self.results.take() } - fn create_client(&self) -> RedisClient { - RedisClient { + fn create_client(&self) -> Client { + Client { inner: self.inner.clone(), } } - /// TODO remove Result wrapper in next major version - fn next(self) -> Result<(), RedisError> { + fn next(self) { if !self.can_continue { - return Ok(()); + return; } let mut _self = self; next_key_page(&_self.inner, &mut _self.scan_state); - Ok(()) + } + + fn cancel(mut self) { + let _ = self.scan_state.take(); } } /// The result of a HSCAN operation. pub struct HScanResult { - pub(crate) results: Option, - pub(crate) inner: RefCount, + pub(crate) results: Option, + pub(crate) inner: RefCount, pub(crate) scan_state: Option, pub(crate) can_continue: bool, } -fn next_hscan_page(inner: &RefCount, state: &mut Option) { +fn next_hscan_page(inner: &RefCount, state: &mut Option) { if let Some(state) = state.take() { let response = ResponseKind::ValueScan(state); - let cmd: RedisCommand = (RedisCommandKind::Hscan, Vec::new(), response).into(); + let cmd: Command = (CommandKind::Hscan, Vec::new(), response).into(); let _ = interfaces::default_send_command(inner, cmd); } } @@ -175,7 +175,7 @@ impl Drop for HScanResult { } impl Scanner for HScanResult { - type Page = RedisMap; + type Page = Map; fn cursor(&self) -> Option> { if let Some(ref state) = self.scan_state { @@ -197,35 +197,38 @@ impl Scanner for HScanResult { self.results.take() } - fn create_client(&self) -> RedisClient { - RedisClient { + fn create_client(&self) -> Client { + Client { inner: self.inner.clone(), } } - fn next(self) -> Result<(), RedisError> { + fn next(self) { if !self.can_continue { - return Ok(()); + return; } let mut _self = self; next_hscan_page(&_self.inner, &mut _self.scan_state); - Ok(()) + } + + fn cancel(mut self) { + let _ = self.scan_state.take(); } } /// The result of a SSCAN operation. pub struct SScanResult { - pub(crate) results: Option>, - pub(crate) inner: RefCount, + pub(crate) results: Option>, + pub(crate) inner: RefCount, pub(crate) scan_state: Option, pub(crate) can_continue: bool, } -fn next_sscan_page(inner: &RefCount, state: &mut Option) { +fn next_sscan_page(inner: &RefCount, state: &mut Option) { if let Some(state) = state.take() { let response = ResponseKind::ValueScan(state); - let cmd: RedisCommand = (RedisCommandKind::Sscan, Vec::new(), response).into(); + let cmd: Command = (CommandKind::Sscan, Vec::new(), response).into(); let _ = interfaces::default_send_command(inner, cmd); } } @@ -239,7 +242,7 @@ impl Drop for SScanResult { } impl Scanner for SScanResult { - type Page = Vec; + type Page = Vec; fn cursor(&self) -> Option> { if let Some(ref state) = self.scan_state { @@ -261,35 +264,38 @@ impl Scanner for SScanResult { self.can_continue } - fn create_client(&self) -> RedisClient { - RedisClient { + fn create_client(&self) -> Client { + Client { inner: self.inner.clone(), } } - fn next(self) -> Result<(), RedisError> { + fn next(self) { if !self.can_continue { - return Ok(()); + return; } let mut _self = self; next_sscan_page(&_self.inner, &mut _self.scan_state); - Ok(()) + } + + fn cancel(mut self) { + let _ = self.scan_state.take(); } } /// The result of a ZSCAN operation. pub struct ZScanResult { - pub(crate) results: Option>, - pub(crate) inner: RefCount, + pub(crate) results: Option>, + pub(crate) inner: RefCount, pub(crate) scan_state: Option, pub(crate) can_continue: bool, } -fn next_zscan_page(inner: &RefCount, state: &mut Option) { +fn next_zscan_page(inner: &RefCount, state: &mut Option) { if let Some(state) = state.take() { let response = ResponseKind::ValueScan(state); - let cmd: RedisCommand = (RedisCommandKind::Zscan, Vec::new(), response).into(); + let cmd: Command = (CommandKind::Zscan, Vec::new(), response).into(); let _ = interfaces::default_send_command(inner, cmd); } } @@ -303,7 +309,7 @@ impl Drop for ZScanResult { } impl Scanner for ZScanResult { - type Page = Vec<(RedisValue, f64)>; + type Page = Vec<(Value, f64)>; fn cursor(&self) -> Option> { if let Some(ref state) = self.scan_state { @@ -325,19 +331,22 @@ impl Scanner for ZScanResult { self.results.take() } - fn create_client(&self) -> RedisClient { - RedisClient { + fn create_client(&self) -> Client { + Client { inner: self.inner.clone(), } } - fn next(self) -> Result<(), RedisError> { + fn next(self) { if !self.can_continue { - return Ok(()); + return; } let mut _self = self; next_zscan_page(&_self.inner, &mut _self.scan_state); - Ok(()) + } + + fn cancel(mut self) { + let _ = self.scan_state.take(); } } diff --git a/src/types/scripts.rs b/src/types/scripts.rs index d85f881e..c836b08c 100644 --- a/src/types/scripts.rs +++ b/src/types/scripts.rs @@ -1,11 +1,12 @@ +#[cfg(feature = "sha-1")] +use crate::util::sha1_hash; use crate::{ - clients::RedisClient, + clients::Client, interfaces::{FunctionInterface, LuaInterface}, - prelude::{FromRedis, RedisError, RedisResult}, - types::{MultipleKeys, MultipleValues, RedisValue}, + prelude::{Error, ErrorKind, FredResult, FromValue}, + types::{MultipleKeys, MultipleValues, Value}, + utils, }; -#[cfg(feature = "sha-1")] -use crate::{prelude::RedisErrorKind, util::sha1_hash}; use bytes_utils::Str; use std::{ cmp::Ordering, @@ -17,12 +18,111 @@ use std::{ ops::Deref, }; +/// Flags for the SCRIPT DEBUG command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ScriptDebugFlag { + Yes, + No, + Sync, +} + +impl ScriptDebugFlag { + #[cfg(feature = "i-scripts")] + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ScriptDebugFlag::Yes => "YES", + ScriptDebugFlag::No => "NO", + ScriptDebugFlag::Sync => "SYNC", + }) + } +} + +/// The policy type for the [FUNCTION RESTORE](https://redis.io/commands/function-restore/) command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum FnPolicy { + Flush, + Append, + Replace, +} + +impl Default for FnPolicy { + fn default() -> Self { + FnPolicy::Append + } +} + +impl FnPolicy { + #[cfg(feature = "i-scripts")] + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + FnPolicy::Flush => "FLUSH", + FnPolicy::Append => "APPEND", + FnPolicy::Replace => "REPLACE", + }) + } + + pub(crate) fn from_str(s: &str) -> Result { + Ok(match s { + "flush" | "FLUSH" => FnPolicy::Flush, + "append" | "APPEND" => FnPolicy::Append, + "replace" | "REPLACE" => FnPolicy::Replace, + _ => { + return Err(Error::new( + ErrorKind::InvalidArgument, + "Invalid function restore policy.", + )) + }, + }) + } +} + +// have to implement these for specific types to avoid conflicting with the core Into implementation +impl TryFrom<&str> for FnPolicy { + type Error = Error; + + fn try_from(value: &str) -> Result { + FnPolicy::from_str(value) + } +} + +impl TryFrom<&String> for FnPolicy { + type Error = Error; + + fn try_from(value: &String) -> Result { + FnPolicy::from_str(value.as_str()) + } +} + +impl TryFrom for FnPolicy { + type Error = Error; + + fn try_from(value: String) -> Result { + FnPolicy::from_str(value.as_str()) + } +} + +impl TryFrom for FnPolicy { + type Error = Error; + + fn try_from(value: Str) -> Result { + FnPolicy::from_str(&value) + } +} + +impl TryFrom<&Str> for FnPolicy { + type Error = Error; + + fn try_from(value: &Str) -> Result { + FnPolicy::from_str(value) + } +} + /// An interface for caching and running lua scripts. /// /// ```rust no_run -/// # use fred::types::Script; +/// # use fred::types::scripts::Script; /// # use fred::prelude::*; -/// async fn example(client: &RedisClient) -> Result<(), RedisError> { +/// async fn example(client: &Client) -> Result<(), Error> { /// let script = Script::from_lua("return ARGV[1]"); /// assert_eq!(script.sha1(), "098e0f0d1448c0a81dafe820f66d460eb09263da"); /// @@ -95,22 +195,22 @@ impl Script { /// called once before calling [evalsha](Self::evalsha). #[cfg(feature = "sha-1")] #[cfg_attr(docsrs, doc(cfg(feature = "sha-1")))] - pub async fn load(&self, client: &RedisClient) -> RedisResult<()> { + pub async fn load(&self, client: &Client) -> FredResult<()> { if let Some(ref lua) = self.lua { client.script_load_cluster::<(), _>(lua.clone()).await } else { - Err(RedisError::new(RedisErrorKind::Unknown, "Missing lua script contents.")) + Err(Error::new(ErrorKind::Unknown, "Missing lua script contents.")) } } /// Send `EVALSHA` to the server with the provided arguments. - pub async fn evalsha(&self, client: &C, keys: K, args: V) -> RedisResult + pub async fn evalsha(&self, client: &C, keys: K, args: V) -> FredResult where - R: FromRedis, + R: FromValue, C: LuaInterface + Send + Sync, K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { client.evalsha(self.hash.clone(), keys, args).await } @@ -119,12 +219,12 @@ impl Script { /// of `NOSCRIPT` error and try `EVALSHA` again. #[cfg(feature = "sha-1")] #[cfg_attr(docsrs, doc(cfg(feature = "sha-1")))] - pub async fn evalsha_with_reload(&self, client: &RedisClient, keys: K, args: V) -> RedisResult + pub async fn evalsha_with_reload(&self, client: &Client, keys: K, args: V) -> FredResult where - R: FromRedis, + R: FromValue, K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { into!(keys); try_into!(args); @@ -139,7 +239,7 @@ impl Script { } } -/// Possible [flags](https://redis.io/docs/manual/programmability/lua-api/) associated with a [Function](crate::types::Function). +/// Possible [flags](https://redis.io/docs/manual/programmability/lua-api/) associated with a [Function](crate::types::scripts::Function). #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub enum FunctionFlag { NoWrites, @@ -175,9 +275,7 @@ impl FunctionFlag { } } -/// An individual function within a [Library](crate::types::Library). -/// -/// See the [library documentation](crate::types::Library) for more information. +/// An individual function within a [Library](crate::types::scripts::Library). #[derive(Clone, Debug, Eq, PartialEq)] pub struct Function { pub(crate) name: Str, @@ -228,25 +326,25 @@ impl Function { } /// Send the [fcall](crate::interfaces::FunctionInterface::fcall) command via the provided client. - pub async fn fcall(&self, client: &C, keys: K, args: V) -> RedisResult + pub async fn fcall(&self, client: &C, keys: K, args: V) -> FredResult where - R: FromRedis, + R: FromValue, C: FunctionInterface + Send + Sync, K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { client.fcall(self.name.clone(), keys, args).await } /// Send the [fcall_ro](crate::interfaces::FunctionInterface::fcall_ro) command via the provided client. - pub async fn fcall_ro(&self, client: &C, keys: K, args: V) -> RedisResult + pub async fn fcall_ro(&self, client: &C, keys: K, args: V) -> FredResult where - R: FromRedis, + R: FromValue, C: FunctionInterface + Send + Sync, K: Into + Send, V: TryInto + Send, - V::Error: Into + Send, + V::Error: Into + Send, { client.fcall_ro(self.name.clone(), keys, args).await } @@ -255,7 +353,7 @@ impl Function { /// A helper struct for interacting with [libraries and functions](https://redis.io/docs/manual/programmability/functions-intro/). /// /// ```rust no_run -/// # use fred::types::{FunctionFlag, Library}; +/// # use fred::types::scripts::{FunctionFlag, Library}; /// let code = "#!lua name=mylib \n redis.register_function('myfunc', function(keys, args) return \ /// args[1] end)"; /// let library = Library::from_code(client, code).await?; @@ -303,14 +401,14 @@ impl Library { /// Create a new `Library` with the provided code, loading it on all the servers and inspecting the contents via the [FUNCTION LIST](https://redis.io/commands/function-list/) command. /// /// This interface will load the library on the server. - pub async fn from_code(client: &RedisClient, code: S) -> Result + pub async fn from_code(client: &Client, code: S) -> Result where S: Into, { let code = code.into(); let name: Str = client.function_load_cluster(true, code).await?; let functions = client - .function_list::(Some(name.deref()), false) + .function_list::(Some(name.deref()), false) .await? .as_functions(&name)?; @@ -323,13 +421,13 @@ impl Library { /// Create a new `Library` with the associated name, inspecting the library contents via the [FUNCTION LIST](https://redis.io/commands/function-list/) command. /// /// This interface assumes the library is already loaded on the server. - pub async fn from_name(client: &RedisClient, name: S) -> Result + pub async fn from_name(client: &Client, name: S) -> Result where S: Into, { let name = name.into(); let functions = client - .function_list::(Some(name.deref()), false) + .function_list::(Some(name.deref()), false) .await? .as_functions(&name)?; diff --git a/src/types/sorted_sets.rs b/src/types/sorted_sets.rs index 0bc20320..629f55b4 100644 --- a/src/types/sorted_sets.rs +++ b/src/types/sorted_sets.rs @@ -1,6 +1,6 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - types::RedisValue, + error::{Error, ErrorKind}, + types::Value, utils, }; use bytes_utils::Str; @@ -84,7 +84,7 @@ impl From> for MultipleWeights { /// Convenience struct for the `ZADD` command to accept 1 or more `(score, value)` arguments. pub struct MultipleZaddValues { - values: Vec<(f64, RedisValue)>, + values: Vec<(f64, Value)>, } impl MultipleZaddValues { @@ -92,7 +92,7 @@ impl MultipleZaddValues { MultipleZaddValues { values: Vec::new() } } - pub fn inner(self) -> Vec<(f64, RedisValue)> { + pub fn inner(self) -> Vec<(f64, Value)> { self.values } @@ -103,10 +103,10 @@ impl MultipleZaddValues { impl TryFrom<(f64, T)> for MultipleZaddValues where - T: TryInto, - T::Error: Into, + T: TryInto, + T::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from((f, d): (f64, T)) -> Result { Ok(MultipleZaddValues { @@ -117,7 +117,7 @@ where impl FromIterator<(f64, T)> for MultipleZaddValues where - T: Into, + T: Into, { fn from_iter>(iter: I) -> Self { MultipleZaddValues { @@ -128,10 +128,10 @@ where impl TryFrom> for MultipleZaddValues where - T: TryInto, - T::Error: Into, + T: TryInto, + T::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(d: Vec<(f64, T)>) -> Result { let mut values = Vec::with_capacity(d.len()); @@ -145,10 +145,10 @@ where impl TryFrom> for MultipleZaddValues where - T: TryInto, - T::Error: Into, + T: TryInto, + T::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(d: VecDeque<(f64, T)>) -> Result { let mut values = Vec::with_capacity(d.len()); @@ -248,7 +248,7 @@ impl<'a> From<&'a String> for ZRangeBound { } impl TryFrom for ZRangeBound { - type Error = RedisError; + type Error = Error; fn try_from(f: f64) -> Result { let value = if f.is_infinite() && f.is_sign_negative() { @@ -256,10 +256,7 @@ impl TryFrom for ZRangeBound { } else if f.is_infinite() { ZRangeBound::InfiniteScore } else if f.is_nan() { - return Err(RedisError::new( - RedisErrorKind::Unknown, - "Cannot use NaN as zrange field.", - )); + return Err(Error::new(ErrorKind::Unknown, "Cannot use NaN as zrange field.")); } else { ZRangeBound::Score(f) }; @@ -289,26 +286,26 @@ pub struct ZRange { } impl ZRange { - pub(crate) fn into_value(self) -> Result { + pub(crate) fn into_value(self) -> Result { let value = if self.kind == ZRangeKind::Exclusive { match self.range { ZRangeBound::Index(i) => format!("({}", i).into(), ZRangeBound::Score(f) => utils::f64_to_zrange_bound(f, &self.kind)?.into(), ZRangeBound::Lex(s) => utils::check_lex_str(s, &self.kind).into(), - ZRangeBound::InfiniteLex => RedisValue::from_static_str("+"), - ZRangeBound::NegInfinityLex => RedisValue::from_static_str("-"), - ZRangeBound::InfiniteScore => RedisValue::from_static_str("+inf"), - ZRangeBound::NegInfiniteScore => RedisValue::from_static_str("-inf"), + ZRangeBound::InfiniteLex => Value::from_static_str("+"), + ZRangeBound::NegInfinityLex => Value::from_static_str("-"), + ZRangeBound::InfiniteScore => Value::from_static_str("+inf"), + ZRangeBound::NegInfiniteScore => Value::from_static_str("-inf"), } } else { match self.range { ZRangeBound::Index(i) => i.into(), ZRangeBound::Score(f) => f.try_into()?, ZRangeBound::Lex(s) => utils::check_lex_str(s, &self.kind).into(), - ZRangeBound::InfiniteLex => RedisValue::from_static_str("+"), - ZRangeBound::NegInfinityLex => RedisValue::from_static_str("-"), - ZRangeBound::InfiniteScore => RedisValue::from_static_str("+inf"), - ZRangeBound::NegInfiniteScore => RedisValue::from_static_str("-inf"), + ZRangeBound::InfiniteLex => Value::from_static_str("+"), + ZRangeBound::NegInfinityLex => Value::from_static_str("-"), + ZRangeBound::InfiniteScore => Value::from_static_str("+inf"), + ZRangeBound::NegInfiniteScore => Value::from_static_str("-inf"), } }; @@ -353,7 +350,7 @@ impl<'a> From<&'a String> for ZRange { } impl TryFrom for ZRange { - type Error = RedisError; + type Error = Error; fn try_from(f: f64) -> Result { Ok(ZRange { @@ -368,3 +365,21 @@ impl<'a> From<&'a ZRange> for ZRange { range.clone() } } + +/// Aggregate options for the [zinterstore](https://redis.io/commands/zinterstore) (and related) commands. +pub enum AggregateOptions { + Sum, + Min, + Max, +} + +impl AggregateOptions { + #[cfg(feature = "i-sorted-sets")] + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + AggregateOptions::Sum => "SUM", + AggregateOptions::Min => "MIN", + AggregateOptions::Max => "MAX", + }) + } +} diff --git a/src/types/streams.rs b/src/types/streams.rs index f464495e..c3397314 100644 --- a/src/types/streams.rs +++ b/src/types/streams.rs @@ -1,7 +1,7 @@ use crate::{ commands::{MAXLEN, MINID}, - error::{RedisError, RedisErrorKind}, - types::{LimitCount, RedisKey, RedisValue, StringOrNumber}, + error::{Error, ErrorKind}, + types::{Key, LimitCount, StringOrNumber, Value}, utils, }; use bytes_utils::Str; @@ -27,18 +27,13 @@ impl XCapTrim { } impl<'a> TryFrom<&'a str> for XCapTrim { - type Error = RedisError; + type Error = Error; fn try_from(s: &'a str) -> Result { Ok(match s { "=" => XCapTrim::Exact, "~" => XCapTrim::AlmostExact, - _ => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "Invalid XADD trim value.", - )) - }, + _ => return Err(Error::new(ErrorKind::InvalidArgument, "Invalid XADD trim value.")), }) } } @@ -46,7 +41,7 @@ impl<'a> TryFrom<&'a str> for XCapTrim { /// One or more ordered key-value pairs, typically used as an argument for `XADD`. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MultipleOrderedPairs { - values: Vec<(RedisKey, RedisValue)>, + values: Vec<(Key, Value)>, } impl MultipleOrderedPairs { @@ -54,7 +49,7 @@ impl MultipleOrderedPairs { self.values.len() } - pub fn inner(self) -> Vec<(RedisKey, RedisValue)> { + pub fn inner(self) -> Vec<(Key, Value)> { self.values } } @@ -67,11 +62,11 @@ impl From<()> for MultipleOrderedPairs { impl TryFrom<(K, V)> for MultipleOrderedPairs where - K: Into, - V: TryInto, - V::Error: Into, + K: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from((key, value): (K, V)) -> Result { Ok(MultipleOrderedPairs { @@ -82,54 +77,54 @@ where impl TryFrom> for MultipleOrderedPairs where - K: Into, - V: TryInto, - V::Error: Into, + K: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(values: Vec<(K, V)>) -> Result { Ok(MultipleOrderedPairs { values: values .into_iter() .map(|(key, value)| Ok((key.into(), to!(value)?))) - .collect::, RedisError>>()?, + .collect::, Error>>()?, }) } } impl TryFrom> for MultipleOrderedPairs where - K: Into, - V: TryInto, - V::Error: Into, + K: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(values: VecDeque<(K, V)>) -> Result { Ok(MultipleOrderedPairs { values: values .into_iter() .map(|(key, value)| Ok((key.into(), to!(value)?))) - .collect::, RedisError>>()?, + .collect::, Error>>()?, }) } } impl TryFrom> for MultipleOrderedPairs where - K: Into, - V: TryInto, - V::Error: Into, + K: Into, + V: TryInto, + V::Error: Into, { - type Error = RedisError; + type Error = Error; fn try_from(values: HashMap) -> Result { Ok(MultipleOrderedPairs { values: values .into_iter() .map(|(key, value)| Ok((key.into(), to!(value)?))) - .collect::, RedisError>>()?, + .collect::, Error>>()?, }) } } @@ -200,18 +195,13 @@ impl XCapKind { } impl<'a> TryFrom<&'a str> for XCapKind { - type Error = RedisError; + type Error = Error; fn try_from(value: &'a str) -> Result { Ok(match value { "MAXLEN" => XCapKind::MaxLen, "MINID" => XCapKind::MinID, - _ => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "Expected MAXLEN or MINID,", - )) - }, + _ => return Err(Error::new(ErrorKind::InvalidArgument, "Expected MAXLEN or MINID,")), }) } } @@ -239,12 +229,12 @@ impl From> for XCap { impl TryFrom<(K, T, S, Option)> for XCap where K: TryInto, - K::Error: Into, + K::Error: Into, T: TryInto, - T::Error: Into, + T::Error: Into, S: Into, { - type Error = RedisError; + type Error = Error; fn try_from((kind, trim, threshold, limit): (K, T, S, Option)) -> Result { let (kind, trim) = (to!(kind)?, to!(trim)?); @@ -257,12 +247,12 @@ where impl TryFrom<(K, T, S)> for XCap where K: TryInto, - K::Error: Into, + K::Error: Into, T: TryInto, - T::Error: Into, + T::Error: Into, S: Into, { - type Error = RedisError; + type Error = Error; fn try_from((kind, trim, threshold): (K, T, S)) -> Result { let (kind, trim) = (to!(kind)?, to!(trim)?); @@ -275,10 +265,10 @@ where impl TryFrom<(K, S)> for XCap where K: TryInto, - K::Error: Into, + K::Error: Into, S: Into, { - type Error = RedisError; + type Error = Error; fn try_from((kind, threshold): (K, S)) -> Result { let kind = to!(kind)?; @@ -370,7 +360,7 @@ pub struct XPendingArgs { } impl XPendingArgs { - pub(crate) fn into_parts(self) -> Result, XID, XID, u64, Option)>, RedisError> { + pub(crate) fn into_parts(self) -> Result, XID, XID, u64, Option)>, Error> { let is_empty = self.idle.is_none() && self.start.is_none() && self.end.is_none() @@ -383,8 +373,8 @@ impl XPendingArgs { let start = match self.start { Some(s) => s, None => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, "The `start` argument is required in this context.", )) }, @@ -392,8 +382,8 @@ impl XPendingArgs { let end = match self.end { Some(s) => s, None => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, "The `end` argument is required in this context.", )) }, @@ -401,8 +391,8 @@ impl XPendingArgs { let count = match self.count { Some(s) => s, None => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, + return Err(Error::new( + ErrorKind::InvalidArgument, "The `count` argument is required in this context.", )) }, @@ -493,18 +483,18 @@ where /// A generic helper type describing the ID and associated map for each record in a stream. /// -/// See the [XReadResponse](crate::types::XReadResponse) type for more information. +/// See the [XReadResponse](crate::types::streams::XReadResponse) type for more information. pub type XReadValue = (I, HashMap); /// A generic helper type describing the top level response from `XREAD` or `XREADGROUP`. /// /// See the [xread](crate::interfaces::StreamsInterface::xread) documentation for more information. /// /// The inner type declarations refer to the following: -/// * K1 - The type of the outer Redis key for the stream. Usually a `String` or `RedisKey`. +/// * K1 - The type of the outer key for the stream. Usually a `String` or `Key`. /// * I - The type of the ID for a stream record ("abc-123"). This is usually a `String`. /// * K2 - The type of key in the map associated with each stream record. /// * V - The type of value in the map associated with each stream record. /// /// To support heterogeneous values in the map describing each stream element it is recommended to declare the last -/// type as `RedisValue` and [convert](crate::types::RedisValue::convert) as needed. +/// type as `Value` and [convert](crate::types::Value::convert) as needed. pub type XReadResponse = HashMap>>; diff --git a/src/types/timeseries.rs b/src/types/timeseries.rs index 0029c063..a59b035e 100644 --- a/src/types/timeseries.rs +++ b/src/types/timeseries.rs @@ -1,6 +1,6 @@ use crate::{ - error::{RedisError, RedisErrorKind}, - types::RedisValue, + error::{Error, ErrorKind}, + types::Value, utils, }; use bytes_utils::Str; @@ -65,14 +65,14 @@ impl Default for Timestamp { } impl Timestamp { - pub(crate) fn to_value(&self) -> RedisValue { + pub(crate) fn to_value(&self) -> Value { match *self { - Timestamp::Now => RedisValue::String(utils::static_str("*")), - Timestamp::Custom(v) => RedisValue::Integer(v), + Timestamp::Now => Value::String(utils::static_str("*")), + Timestamp::Custom(v) => Value::Integer(v), } } - pub(crate) fn from_str(value: &str) -> Result { + pub(crate) fn from_str(value: &str) -> Result { match value { "*" => Ok(Timestamp::Now), _ => Ok(Timestamp::Custom(value.parse::()?)), @@ -87,7 +87,7 @@ impl From for Timestamp { } impl TryFrom<&str> for Timestamp { - type Error = RedisError; + type Error = Error; fn try_from(value: &str) -> Result { Self::from_str(value) @@ -95,7 +95,7 @@ impl TryFrom<&str> for Timestamp { } impl TryFrom for Timestamp { - type Error = RedisError; + type Error = Error; fn try_from(value: Str) -> Result { Self::from_str(&value) @@ -103,7 +103,7 @@ impl TryFrom for Timestamp { } impl TryFrom for Timestamp { - type Error = RedisError; + type Error = Error; fn try_from(value: String) -> Result { Self::from_str(&value) @@ -205,7 +205,7 @@ pub enum GetTimestamp { } impl GetTimestamp { - pub(crate) fn to_value(&self) -> RedisValue { + pub(crate) fn to_value(&self) -> Value { match *self { GetTimestamp::Earliest => static_val!("-"), GetTimestamp::Latest => static_val!("+"), @@ -215,7 +215,7 @@ impl GetTimestamp { } impl TryFrom<&str> for GetTimestamp { - type Error = RedisError; + type Error = Error; fn try_from(value: &str) -> Result { Ok(match value { @@ -316,19 +316,14 @@ pub enum BucketTimestamp { } impl TryFrom<&str> for BucketTimestamp { - type Error = RedisError; + type Error = Error; fn try_from(value: &str) -> Result { Ok(match value { "-" | "start" => BucketTimestamp::Start, "+" | "end" => BucketTimestamp::End, "~" | "mid" => BucketTimestamp::Mid, - _ => { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "Invalid bucket timestamp.", - )) - }, + _ => return Err(Error::new(ErrorKind::InvalidArgument, "Invalid bucket timestamp.")), }) } } @@ -345,19 +340,19 @@ impl BucketTimestamp { /// Shorthand for the result of commands such as `MGET`, `MRANGE`, etc. /// -/// * **K** - The key type, usually a `RedisKey`, `Str`, or `String`. +/// * **K** - The key type, usually a `Key`, `Str`, or `String`. /// * **Lk** - The label key type, usually a `Str` or `String`. /// * **Lv** - The label value type, often some kind of string type. /// -/// The fastest/cheapest option is usually `TimeseriesValues`. +/// The fastest/cheapest option is usually `TimeseriesValues`. /// /// ```rust /// # use fred::prelude::*; /// # use tokio::time::sleep; /// # use std::time::Duration; /// # use bytes_utils::Str; -/// # use fred::types::{RespVersion, GetLabels, Resp2TimeSeriesValues}; -/// async fn example(client: &RedisClient) -> Result<(), RedisError> { +/// # use fred::types::{RespVersion, timeseries::{GetLabels, Resp2TimeSeriesValues}}; +/// async fn example(client: &Client) -> Result<(), Error> { /// assert_eq!(client.protocol_version(), RespVersion::RESP2); /// /// client @@ -376,7 +371,7 @@ impl BucketTimestamp { /// .ts_add("bar", "*", 4.4, None, None, None, None, ("a", "b")) /// .await?; /// -/// let ranges: Resp2TimeSeriesValues = client +/// let ranges: Resp2TimeSeriesValues = client /// .ts_mrange( /// "-", /// "+", @@ -400,11 +395,11 @@ impl BucketTimestamp { /// } /// ``` /// -/// See [Resp3TimeSeriesValues](crate::types::Resp3TimeSeriesValues) for the RESP3 equivalent. +/// See [Resp3TimeSeriesValues](crate::types::timeseries::Resp3TimeSeriesValues) for the RESP3 equivalent. #[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] pub type Resp2TimeSeriesValues = Vec<(K, Vec<(Lk, Lv)>, Vec<(i64, f64)>)>; -/// The RESP3 equivalent of [Resp2TimeSeriesValues](crate::types::Resp2TimeSeriesValues). +/// The RESP3 equivalent of [Resp2TimeSeriesValues](crate::types::timeseries::Resp2TimeSeriesValues). /// /// The timeseries interface uses slightly different type signatures in RESP3 mode. /// @@ -413,8 +408,8 @@ pub type Resp2TimeSeriesValues = Vec<(K, Vec<(Lk, Lv)>, Vec<(i64, f64 /// # use tokio::time::sleep; /// # use std::time::Duration; /// # use bytes_utils::Str; -/// # use fred::types::{RespVersion, GetLabels, Resp3TimeSeriesValues}; -/// async fn example(client: &RedisClient) -> Result<(), RedisError> { +/// # use fred::types::{RespVersion, timeseries::{GetLabels, Resp3TimeSeriesValues}}; +/// async fn example(client: &Client) -> Result<(), Error> { /// assert_eq!(client.protocol_version(), RespVersion::RESP3); /// /// client @@ -433,7 +428,7 @@ pub type Resp2TimeSeriesValues = Vec<(K, Vec<(Lk, Lv)>, Vec<(i64, f64 /// .ts_add("bar", "*", 4.4, None, None, None, None, ("a", "b")) /// .await?; /// -/// let ranges: Resp3TimeSeriesValues = client +/// let ranges: Resp3TimeSeriesValues = client /// .ts_mget(false, Some(GetLabels::WithLabels), ["a=b"]) /// .await?; /// diff --git a/src/utils.rs b/src/utils.rs index 221e18dd..9fc7e9f9 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,17 +1,17 @@ use crate::{ - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces::ClientLike, - modules::inner::{CommandSender, RedisClientInner}, + modules::inner::{ClientInner, CommandSender}, + prelude::{Blocking, Server}, protocol::{ - command::{RedisCommand, RedisCommandKind}, + command::{Command, CommandKind}, responders::ResponseKind, utils as protocol_utils, }, runtime::{ broadcast_channel, + channel, oneshot_channel, - sleep, - unbounded_channel, AtomicBool, AtomicUsize, BroadcastSender, @@ -19,17 +19,12 @@ use crate::{ RefSwap, RwLock, }, - types::*, + types::{ClientUnblockFlag, *}, }; use bytes::Bytes; use bytes_utils::Str; use float_cmp::approx_eq; -use futures::{ - future::{select, Either}, - pin_mut, - Future, - TryFutureExt, -}; +use futures::{Future, TryFutureExt}; use rand::{self, distributions::Alphanumeric, Rng}; use redis_protocol::resp3::types::BytesFrame as Resp3Frame; use std::{collections::HashMap, convert::TryInto, f64, sync::atomic::Ordering, time::Duration}; @@ -46,6 +41,10 @@ use crate::protocol::tls::{TlsConfig, TlsConnector}; use crate::runtime::Mutex; #[cfg(any(feature = "full-tracing", feature = "partial-tracing"))] use crate::trace; +#[cfg(feature = "i-scripts")] +use crate::types::scripts::{Function, FunctionFlag}; +#[cfg(feature = "i-sorted-sets")] +use crate::types::sorted_sets::ZRangeKind; #[cfg(feature = "transactions")] use std::mem; #[cfg(feature = "unix-sockets")] @@ -54,8 +53,10 @@ use std::path::{Path, PathBuf}; use tracing_futures::Instrument; const REDIS_TLS_SCHEME: &str = "rediss"; -const REDIS_CLUSTER_SCHEME_SUFFIX: &str = "-cluster"; -const REDIS_SENTINEL_SCHEME_SUFFIX: &str = "-sentinel"; +const VALKEY_TLS_SCHEME: &str = "valkeys"; +const CLUSTER_SCHEME_SUFFIX: &str = "-cluster"; +const SENTINEL_SCHEME_SUFFIX: &str = "-sentinel"; +const UNIX_SCHEME_SUFFIX: &str = "+unix"; const SENTINEL_NAME_QUERY: &str = "sentinelServiceName"; const CLUSTER_NODE_QUERY: &str = "node"; #[cfg(feature = "sentinel-auth")] @@ -89,8 +90,8 @@ pub fn f64_opt_eq(lhs: &Option, rhs: &Option) -> bool { } } -/// Convert a redis string to an `f64`, supporting "+inf" and "-inf". -pub fn redis_string_to_f64(s: &str) -> Result { +/// Convert a string to an `f64`, supporting "+inf" and "-inf". +pub fn string_to_f64(s: &str) -> Result { // this is changing in newer versions of redis to lose the "+" prefix if s == "+inf" || s == "inf" { Ok(f64::INFINITY) @@ -98,23 +99,23 @@ pub fn redis_string_to_f64(s: &str) -> Result { Ok(f64::NEG_INFINITY) } else { s.parse::().map_err(|_| { - RedisError::new( - RedisErrorKind::Unknown, + Error::new( + ErrorKind::Unknown, format!("Could not convert {} to floating point value.", s), ) }) } } -/// Convert an `f64` to a redis string, supporting "+inf" and "-inf". -pub fn f64_to_redis_string(d: f64) -> Result { +/// Convert an `f64` to a string, supporting "+inf" and "-inf". +pub fn f64_to_string(d: f64) -> Result { if d.is_infinite() && d.is_sign_negative() { - Ok(RedisValue::from_static_str("-inf")) + Ok(Value::from_static_str("-inf")) } else if d.is_infinite() { - Ok(RedisValue::from_static_str("+inf")) + Ok(Value::from_static_str("+inf")) } else if d.is_nan() { - Err(RedisError::new( - RedisErrorKind::InvalidArgument, + Err(Error::new( + ErrorKind::InvalidArgument, "Cannot convert NaN to redis value.", )) } else { @@ -123,14 +124,14 @@ pub fn f64_to_redis_string(d: f64) -> Result { } #[cfg(feature = "i-sorted-sets")] -pub fn f64_to_zrange_bound(d: f64, kind: &ZRangeKind) -> Result { +pub fn f64_to_zrange_bound(d: f64, kind: &ZRangeKind) -> Result { if d.is_infinite() && d.is_sign_negative() { Ok("-inf".into()) } else if d.is_infinite() { Ok("+inf".into()) } else if d.is_nan() { - Err(RedisError::new( - RedisErrorKind::InvalidArgument, + Err(Error::new( + ErrorKind::InvalidArgument, "Cannot convert NaN to redis value.", )) } else { @@ -158,9 +159,9 @@ pub fn random_string(len: usize) -> String { } #[cfg(feature = "i-memory")] -pub fn convert_or_default(value: RedisValue) -> R +pub fn convert_or_default(value: Value) -> R where - R: FromRedis + Default, + R: FromValue + Default, { value.convert().ok().unwrap_or_default() } @@ -170,26 +171,6 @@ pub fn random_u64(max: u64) -> u64 { rand::thread_rng().gen_range(0 .. max) } -pub fn set_client_state(state: &RwLock, new_state: ClientState) { - let mut state_guard = state.write(); - *state_guard = new_state; -} - -pub fn check_and_set_client_state( - state: &RwLock, - expected: ClientState, - new_state: ClientState, -) -> bool { - let mut state_guard = state.write(); - - if *state_guard != expected { - false - } else { - *state_guard = new_state; - true - } -} - pub fn read_bool_atomic(val: &AtomicBool) -> bool { val.load(Ordering::Acquire) } @@ -248,14 +229,14 @@ pub fn check_lex_str(val: String, kind: &ZRangeKind) -> String { /// Parse the response from `FUNCTION LIST`. #[cfg(feature = "i-scripts")] -fn parse_functions(value: &RedisValue) -> Result, RedisError> { - if let RedisValue::Array(functions) = value { +fn parse_functions(value: &Value) -> Result, Error> { + if let Value::Array(functions) = value { let mut out = Vec::with_capacity(functions.len()); for function_block in functions.iter() { - let functions: HashMap = function_block.clone().convert()?; + let functions: HashMap = function_block.clone().convert()?; let name = match functions.get("name").and_then(|n| n.as_bytes_str()) { Some(name) => name, - None => return Err(RedisError::new_parse("Missing function name.")), + None => return Err(Error::new_parse("Missing function name.")), }; let flags: Vec = functions .get("flags") @@ -273,16 +254,16 @@ fn parse_functions(value: &RedisValue) -> Result, RedisError> { Ok(out) } else { - Err(RedisError::new_parse("Invalid functions block.")) + Err(Error::new_parse("Invalid functions block.")) } } /// Check and parse the response to `FUNCTION LIST`. #[cfg(feature = "i-scripts")] -pub fn value_to_functions(value: &RedisValue, name: &str) -> Result, RedisError> { - if let RedisValue::Array(ref libraries) = value { +pub fn value_to_functions(value: &Value, name: &str) -> Result, Error> { + if let Value::Array(ref libraries) = value { for library in libraries.iter() { - let properties: HashMap = library.clone().convert()?; + let properties: HashMap = library.clone().convert()?; let should_parse = properties .get("library_name") .and_then(|v| v.as_str()) @@ -296,43 +277,36 @@ pub fn value_to_functions(value: &RedisValue, name: &str) -> Result(ft: Fut, timeout: Duration) -> Result +pub async fn timeout(ft: Fut, timeout: Duration) -> Result where - E: Into, + E: Into, Fut: Future>, { if !timeout.is_zero() { - let sleep_ft = sleep(timeout); - pin_mut!(sleep_ft); - pin_mut!(ft); - - trace!("Using timeout: {:?}", timeout); - match select(ft, sleep_ft).await { - Either::Left((lhs, _)) => lhs.map_err(|e| e.into()), - Either::Right((_, _)) => Err(RedisError::new(RedisErrorKind::Timeout, "Request timed out.")), - } + tokio::time::timeout(timeout, ft) + .await + .map_err(|_| Error::new(ErrorKind::Timeout, "Request timed out.")) + .and_then(|r| r.map_err(|e| e.into())) } else { ft.await.map_err(|e| e.into()) } } /// Disconnect any state shared with the last router task spawned by the client. -pub fn reset_router_task(inner: &RefCount) { +pub fn reset_router_task(inner: &RefCount) { let _guard = inner._lock.lock(); if !inner.has_command_rx() { _trace!(inner, "Resetting command channel before connecting."); // another connection task is running. this will let the command channel drain, then it'll drop everything on // the old connection/router interface. - let (tx, rx) = unbounded_channel(); - #[cfg(feature = "glommio")] - let tx = tx.into(); + let (tx, rx) = channel(inner.connection.max_command_buffer_len); let old_command_tx = inner.swap_command_tx(tx); inner.store_command_rx(rx, true); @@ -341,12 +315,12 @@ pub fn reset_router_task(inner: &RefCount) { } /// Whether the router should check and interrupt the blocked command. -async fn should_enforce_blocking_policy(inner: &RefCount, command: &RedisCommand) -> bool { +fn should_enforce_blocking_policy(inner: &RefCount, command: &Command) -> bool { if command.kind.closes_connection() { return false; } if matches!(inner.config.blocking, Blocking::Error | Blocking::Interrupt) { - inner.backchannel.write().await.is_blocked() + inner.backchannel.is_blocked() } else { false } @@ -354,30 +328,24 @@ async fn should_enforce_blocking_policy(inner: &RefCount, comm /// Interrupt the currently blocked connection (if found) with the provided flag. pub async fn interrupt_blocked_connection( - inner: &RefCount, + inner: &RefCount, flag: ClientUnblockFlag, -) -> Result<(), RedisError> { +) -> Result<(), Error> { let connection_id = { - let backchannel = inner.backchannel.write().await; - let server = match backchannel.blocked_server() { + let server = match inner.backchannel.blocked_server() { Some(server) => server, - None => return Err(RedisError::new(RedisErrorKind::Unknown, "Connection is not blocked.")), + None => return Err(Error::new(ErrorKind::Unknown, "Connection is not blocked.")), }; - let id = match backchannel.connection_id(&server) { + let id = match inner.backchannel.connection_id(&server) { Some(id) => id, - None => { - return Err(RedisError::new( - RedisErrorKind::Unknown, - "Failed to read connection ID.", - )) - }, + None => return Err(Error::new(ErrorKind::Unknown, "Failed to read connection ID.")), }; _debug!(inner, "Sending CLIENT UNBLOCK to {}, ID: {}", server, id); id }; - let command = RedisCommand::new(RedisCommandKind::ClientUnblock, vec![ + let command = Command::new(CommandKind::ClientUnblock, vec![ connection_id.into(), flag.to_str().into(), ]); @@ -387,8 +355,8 @@ pub async fn interrupt_blocked_connection( /// Check the status of the connection (usually before sending a command) to determine whether the connection should /// be unblocked automatically. -async fn check_blocking_policy(inner: &RefCount, command: &RedisCommand) -> Result<(), RedisError> { - if should_enforce_blocking_policy(inner, command).await { +async fn check_blocking_policy(inner: &RefCount, command: &Command) -> Result<(), Error> { + if should_enforce_blocking_policy(inner, command) { _debug!( inner, "Checking to enforce blocking policy for {}", @@ -396,8 +364,8 @@ async fn check_blocking_policy(inner: &RefCount, command: &Red ); if inner.config.blocking == Blocking::Error { - return Err(RedisError::new( - RedisErrorKind::InvalidCommand, + return Err(Error::new( + ErrorKind::InvalidCommand, "Error sending command while connection is blocked.", )); } else if inner.config.blocking == Blocking::Interrupt { @@ -411,7 +379,7 @@ async fn check_blocking_policy(inner: &RefCount, command: &Red } /// Prepare the command options, returning the timeout duration to apply. -pub fn prepare_command(client: &C, command: &mut RedisCommand) -> Duration { +pub fn prepare_command(client: &C, command: &mut Command) -> Duration { client.change_command(command); command.inherit_options(client.inner()); command @@ -420,14 +388,14 @@ pub fn prepare_command(client: &C, command: &mut RedisCommand) -> } /// Send a command to the server using the default response handler. -pub async fn basic_request_response(client: &C, func: F) -> Result +pub async fn basic_request_response(client: &C, func: F) -> Result where C: ClientLike, - R: Into, - F: FnOnce() -> Result, + R: Into, + F: FnOnce() -> Result, { let inner = client.inner(); - let mut command: RedisCommand = func()?.into(); + let mut command: Command = func()?.into(); let (tx, rx) = oneshot_channel(); command.response = ResponseKind::Respond(Some(tx)); @@ -436,24 +404,32 @@ where check_blocking_policy(inner, &command).await?; client.send_command(command)?; - timeout(rx, timeout_dur) - .and_then(|r| async { r }) - .map_err(move |error| { + if timeout_dur.is_zero() { + rx.map_err(move |error| { set_bool_atomic(&timed_out, true); - error + Error::from(error) }) - .await + .await? + } else { + timeout(rx, timeout_dur) + .and_then(|r| async { r }) + .map_err(move |error| { + set_bool_atomic(&timed_out, true); + error + }) + .await + } } /// Send a command to the server, with tracing. #[cfg(any(feature = "full-tracing", feature = "partial-tracing"))] #[allow(clippy::needless_borrows_for_generic_args)] // despite what clippy says, this^ actually matters for tracing `record` calls (at least it seems where `V: Copy`) -pub async fn request_response(client: &C, func: F) -> Result +pub async fn request_response(client: &C, func: F) -> Result where C: ClientLike, - R: Into, - F: FnOnce() -> Result, + R: Into, + F: FnOnce() -> Result, { let inner = client.inner(); if !inner.should_trace() { @@ -469,7 +445,7 @@ where let _guard = args_span.enter(); let (tx, rx) = oneshot_channel(); - let mut command: RedisCommand = func()?.into(); + let mut command: Command = func()?.into(); command.response = ResponseKind::Respond(Some(tx)); let req_size = protocol_utils::args_size(command.args()); @@ -495,26 +471,27 @@ where check_blocking_policy(inner, &command).await?; client.send_command(command)?; - timeout(rx, timeout_dur) - .and_then(|r| async { r }) - .map_err(move |error| { - set_bool_atomic(&timed_out, true); - error - }) - .and_then(|frame| async move { - trace::record_response_size(&end_cmd_span, &frame); - Ok::<_, RedisError>(frame) - }) - .instrument(cmd_span) - .await + let ft = async { rx.instrument(cmd_span).await.map_err(|e| e.into()).and_then(|r| r) }; + let result = if timeout_dur.is_zero() { + ft.await + } else { + timeout(ft, timeout_dur).await + }; + + if let Ok(ref frame) = result { + trace::record_response_size(&end_cmd_span, frame); + } else { + set_bool_atomic(&timed_out, true); + } + result } #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] -pub async fn request_response(client: &C, func: F) -> Result +pub async fn request_response(client: &C, func: F) -> Result where C: ClientLike, - R: Into, - F: FnOnce() -> Result, + R: Into, + F: FnOnce() -> Result, { basic_request_response(client, func).await } @@ -523,19 +500,18 @@ where /// /// A new connection may be created. pub async fn backchannel_request_response( - inner: &RefCount, - command: RedisCommand, + inner: &RefCount, + command: Command, use_blocked: bool, -) -> Result { - let mut backchannel = inner.backchannel.write().await; - let server = backchannel.find_server(inner, &command, use_blocked)?; - backchannel.request_response(inner, &server, command).await +) -> Result { + let server = inner.backchannel.find_server(inner, &command, use_blocked).await?; + inner.backchannel.request_response(inner, &server, command).await } /// Check for a scan pattern without a hash tag, or with a wildcard in the hash tag. /// /// These patterns will result in scanning a random node if used against a clustered redis. -pub fn clustered_scan_pattern_has_hash_tag(inner: &RefCount, pattern: &str) -> bool { +pub fn clustered_scan_pattern_has_hash_tag(inner: &RefCount, pattern: &str) -> bool { let (mut i, mut j, mut has_wildcard) = (None, None, false); for (idx, c) in pattern.chars().enumerate() { if c == '{' && i.is_none() { @@ -565,18 +541,18 @@ pub fn clustered_scan_pattern_has_hash_tag(inner: &RefCount, p } /// A generic TryInto wrapper to work with the Infallible error type in the blanket From implementation. -pub fn try_into(val: S) -> Result +pub fn try_into(val: S) -> Result where S: TryInto, - S::Error: Into, + S::Error: Into, { val.try_into().map_err(|e| e.into()) } -pub fn try_into_vec(values: Vec) -> Result, RedisError> +pub fn try_into_vec(values: Vec) -> Result, Error> where - S: TryInto, - S::Error: Into, + S: TryInto, + S::Error: Into, { let mut out = Vec::with_capacity(values.len()); for value in values.into_iter() { @@ -594,13 +570,13 @@ pub fn add_jitter(delay: u64, jitter: u32) -> u64 { } } -pub fn into_redis_map(mut iter: I) -> Result, RedisError> +pub fn into_map(mut iter: I) -> Result, Error> where I: Iterator, - K: TryInto, - K::Error: Into, - V: TryInto, - V::Error: Into, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, { let (lower, upper) = iter.size_hint(); let capacity = if let Some(upper) = upper { upper } else { lower }; @@ -612,19 +588,19 @@ where Ok(out) } -pub fn flatten_nested_array_values(value: RedisValue, depth: usize) -> RedisValue { +pub fn flatten_nested_array_values(value: Value, depth: usize) -> Value { if depth == 0 { return value; } match value { - RedisValue::Array(values) => { + Value::Array(values) => { let inner_size = values.iter().fold(0, |s, v| s + v.array_len().unwrap_or(1)); let mut out = Vec::with_capacity(inner_size); for value in values.into_iter() { match value { - RedisValue::Array(inner) => { + Value::Array(inner) => { for value in inner.into_iter() { out.push(flatten_nested_array_values(value, depth - 1)); } @@ -632,9 +608,9 @@ pub fn flatten_nested_array_values(value: RedisValue, depth: usize) -> RedisValu _ => out.push(value), } } - RedisValue::Array(out) + Value::Array(out) }, - RedisValue::Map(values) => { + Value::Map(values) => { let mut out = HashMap::with_capacity(values.len()); for (key, value) in values.inner().into_iter() { @@ -646,13 +622,13 @@ pub fn flatten_nested_array_values(value: RedisValue, depth: usize) -> RedisValu out.insert(key, value); } - RedisValue::Map(RedisMap { inner: out }) + Value::Map(Map { inner: out }) }, _ => value, } } -pub fn is_maybe_array_map(arr: &[RedisValue]) -> bool { +pub fn is_maybe_array_map(arr: &[Value]) -> bool { if !arr.is_empty() && arr.len() % 2 == 0 { arr.chunks(2).all(|chunk| !chunk[0].is_aggregate_type()) } else { @@ -680,7 +656,7 @@ pub fn check_tls_features() { feature = "enable-native-tls", not(any(feature = "enable-rustls", feature = "enable-rustls-ring")) ))] -pub fn tls_config_from_url(tls: bool) -> Result, RedisError> { +pub fn tls_config_from_url(tls: bool) -> Result, Error> { if tls { TlsConnector::default_native_tls().map(|c| Some(c.into())) } else { @@ -692,7 +668,7 @@ pub fn tls_config_from_url(tls: bool) -> Result, RedisError> { any(feature = "enable-rustls", feature = "enable-rustls-ring"), not(feature = "enable-native-tls") ))] -pub fn tls_config_from_url(tls: bool) -> Result, RedisError> { +pub fn tls_config_from_url(tls: bool) -> Result, Error> { if tls { TlsConnector::default_rustls().map(|c| Some(c.into())) } else { @@ -704,7 +680,7 @@ pub fn tls_config_from_url(tls: bool) -> Result, RedisError> { feature = "enable-native-tls", any(feature = "enable-rustls", feature = "enable-rustls-ring") ))] -pub fn tls_config_from_url(tls: bool) -> Result, RedisError> { +pub fn tls_config_from_url(tls: bool) -> Result, Error> { // default to native-tls when both are enabled if tls { TlsConnector::default_native_tls().map(|c| Some(c.into())) @@ -719,28 +695,33 @@ pub fn swap_new_broadcast_channel(old: &RefSwap bool { - url.scheme().starts_with(REDIS_TLS_SCHEME) + let scheme = url.scheme(); + scheme.starts_with(REDIS_TLS_SCHEME) || scheme.starts_with(VALKEY_TLS_SCHEME) } pub fn url_is_clustered(url: &Url) -> bool { - url.scheme().ends_with(REDIS_CLUSTER_SCHEME_SUFFIX) + url.scheme().ends_with(CLUSTER_SCHEME_SUFFIX) } pub fn url_is_sentinel(url: &Url) -> bool { - url.scheme().ends_with(REDIS_SENTINEL_SCHEME_SUFFIX) + url.scheme().ends_with(SENTINEL_SCHEME_SUFFIX) } -pub fn parse_url(url: &str, default_port: Option) -> Result<(Url, String, u16, bool), RedisError> { +pub fn url_is_unix_socket(url: &Url) -> bool { + url.scheme().ends_with(UNIX_SCHEME_SUFFIX) +} + +pub fn parse_url(url: &str, default_port: Option) -> Result<(Url, String, u16, bool), Error> { let url = Url::parse(url)?; let host = if let Some(host) = url.host_str() { host.to_owned() } else { - return Err(RedisError::new(RedisErrorKind::Config, "Invalid or missing host.")); + return Err(Error::new(ErrorKind::Config, "Invalid or missing host.")); }; let port = if let Some(port) = url.port().or(default_port) { port } else { - return Err(RedisError::new(RedisErrorKind::Config, "Invalid or missing port.")); + return Err(Error::new(ErrorKind::Config, "Invalid or missing port.")); }; let tls = url_uses_tls(&url); @@ -751,18 +732,14 @@ pub fn parse_url(url: &str, default_port: Option) -> Result<(Url, String, u Ok((url, host, port, tls)) } -pub fn url_is_unix_socket(url: &Url) -> bool { - url.scheme() == "redis+unix" -} - #[cfg(feature = "unix-sockets")] -pub fn parse_unix_url(url: &str) -> Result<(Url, PathBuf), RedisError> { +pub fn parse_unix_url(url: &str) -> Result<(Url, PathBuf), Error> { let url = Url::parse(url)?; let path: PathBuf = url.path().into(); Ok((url, path)) } -pub fn parse_url_db(url: &Url) -> Result, RedisError> { +pub fn parse_url_db(url: &Url) -> Result, Error> { let parts: Vec<&str> = if let Some(parts) = url.path_segments() { parts.collect() } else { @@ -770,7 +747,7 @@ pub fn parse_url_db(url: &Url) -> Result, RedisError> { }; if parts.len() > 1 { - return Err(RedisError::new(RedisErrorKind::Config, "Invalid database path.")); + return Err(Error::new(ErrorKind::Config, "Invalid database path.")); } else if parts.is_empty() { return Ok(None); } @@ -782,7 +759,7 @@ pub fn parse_url_db(url: &Url) -> Result, RedisError> { Ok(Some(parts[0].parse()?)) } -pub fn parse_url_credentials(url: &Url) -> Result<(Option, Option), RedisError> { +pub fn parse_url_credentials(url: &Url) -> Result<(Option, Option), Error> { let username = if url.username().is_empty() { None } else { @@ -799,15 +776,15 @@ pub fn parse_url_credentials(url: &Url) -> Result<(Option, Option Result, RedisError> { +pub fn parse_url_other_nodes(url: &Url) -> Result, Error> { let mut out = Vec::new(); for (key, value) in url.query_pairs().into_iter() { if key == CLUSTER_NODE_QUERY { let parts: Vec<&str> = value.split(':').collect(); if parts.len() != 2 { - return Err(RedisError::new( - RedisErrorKind::Config, + return Err(Error::new( + ErrorKind::Config, format!("Invalid host:port for cluster node: {}", value), )); } @@ -821,15 +798,15 @@ pub fn parse_url_other_nodes(url: &Url) -> Result, RedisError> { Ok(out) } -pub fn parse_url_sentinel_service_name(url: &Url) -> Result { +pub fn parse_url_sentinel_service_name(url: &Url) -> Result { for (key, value) in url.query_pairs().into_iter() { if key == SENTINEL_NAME_QUERY { return Ok(value.to_string()); } } - Err(RedisError::new( - RedisErrorKind::Config, + Err(Error::new( + ErrorKind::Config, "Invalid or missing sentinel service name query parameter.", )) } @@ -856,18 +833,14 @@ pub fn parse_url_sentinel_password(url: &Url) -> Option { }) } -pub async fn clear_backchannel_state(inner: &RefCount) { - inner.backchannel.write().await.clear_router_state(inner).await; -} - /// Send QUIT to the servers and clean up the old router task's state. -fn close_router_channel(inner: &RefCount, command_tx: RefCount) { +fn close_router_channel(inner: &RefCount, command_tx: RefCount) { inner.notifications.broadcast_close(); inner.reset_server_state(); - let command = RedisCommand::new(RedisCommandKind::Quit, vec![]); + let command = Command::new(CommandKind::Quit, vec![]); inner.counters.incr_cmd_buffer_len(); - if let Err(_) = command_tx.send(command.into()) { + if let Err(_) = command_tx.try_send(command.into()) { inner.counters.decr_cmd_buffer_len(); _warn!(inner, "Failed to send QUIT when dropping old command channel."); } @@ -876,19 +849,19 @@ fn close_router_channel(inner: &RefCount, command_tx: RefCount #[cfg(test)] mod tests { use super::*; - use crate::{error::RedisError, types::RedisValue}; + use crate::{error::Error, types::Value}; use std::{convert::TryInto, fmt::Debug}; - fn m(v: V) -> RedisValue + fn m(v: V) -> Value where - V: TryInto + Debug, - V::Error: Into + Debug, + V: TryInto + Debug, + V::Error: Into + Debug, { v.try_into().unwrap() } - fn a(v: Vec) -> RedisValue { - RedisValue::Array(v) + fn a(v: Vec) -> Value { + Value::Array(v) } #[test] @@ -910,7 +883,7 @@ mod tests { // 2) 1) "1643479925582-0" // 2) 1) "count" // 2) "6" - let actual: RedisValue = vec![ + let actual: Value = vec![ a(vec![ m("foo"), a(vec![a(vec![m("1643479650336-0"), a(vec![m("count"), m(3)])])]), @@ -927,7 +900,7 @@ mod tests { .collect(); // flatten the top level nested array into something that can be cast to a map - let expected: RedisValue = vec![ + let expected: Value = vec![ m("foo"), a(vec![a(vec![m("1643479650336-0"), a(vec![m("count"), m(3)])])]), m("bar"), diff --git a/tests/README.md b/tests/README.md index ecf34e24..54bbe49b 100644 --- a/tests/README.md +++ b/tests/README.md @@ -2,9 +2,8 @@ Tests are organized by category, similar to the [commands](../src/commands) folder. -By default, most tests run 8 times based on the following configuration parameters: clustered vs centralized servers, -pipelined vs non-pipelined clients, and RESP2 vs RESP3 mode. Helper macros exist to make this easy so each test only has -to be written once. +By default, most tests run 4 times against a cluster and centralized deployments in RESP2 and RESP3 modes. Helper macros +exist to make this easy so each test only has to be written once. **The tests require Redis version >=6.2** As of writing the default version used is 7.2.4. @@ -81,8 +80,8 @@ node in a cluster. 5. Use `centralized_test!` or `cluster_test!` to generate tests in the appropriate module. Centralized tests will be converted to sentinel tests or redis-stack tests if needed. -Tests that use this pattern will run 8 times to check the functionality against clustered and centralized redis servers -with using both pipelined and non-pipelined clients in RESP2 and RESP3 mode. +Tests that use this pattern will run 4 times to check the functionality against clustered and centralized redis servers +in RESP2 and RESP3 mode. ## Notes diff --git a/tests/docker/compose/base.yml b/tests/docker/compose/base.yml index e632a2b4..7e336bda 100644 --- a/tests/docker/compose/base.yml +++ b/tests/docker/compose/base.yml @@ -8,11 +8,11 @@ services: debug: depends_on: - redis-cluster-tls-6 - # - redis-main + - redis-main # - redis-stack-main - redis-cluster-6 - redis-sentinel-3 - - valkey-main + # - valkey-main #- valkey-cluster-6 container_name: "debug" build: diff --git a/tests/docker/runners/bash/all-features.sh b/tests/docker/runners/bash/all-features.sh index b1d3f757..5ab31598 100755 --- a/tests/docker/runners/bash/all-features.sh +++ b/tests/docker/runners/bash/all-features.sh @@ -15,7 +15,7 @@ done # those features individually. FEATURES="network-logs custom-reconnect-errors serde-json blocking-encoding full-tracing monitor metrics sentinel-client subscriber-client dns debug-ids - replicas sha-1 transactions i-all credential-provider specialize-into-bytes" + replicas sha-1 transactions i-all credential-provider" if [ -z "$FRED_CI_NEXTEST" ]; then cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" diff --git a/tests/docker/runners/bash/redis-stack.sh b/tests/docker/runners/bash/redis-stack.sh index b8eaa3f7..688a01e6 100755 --- a/tests/docker/runners/bash/redis-stack.sh +++ b/tests/docker/runners/bash/redis-stack.sh @@ -10,7 +10,7 @@ do fi done -FEATURES="network-logs serde-json debug-ids i-redis-stack i-all" +FEATURES="network-logs serde-json debug-ids i-redis-stack i-all i-hexpire" if [ -z "$FRED_CI_NEXTEST" ]; then cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" diff --git a/tests/docker/runners/images/debug.dockerfile b/tests/docker/runners/images/debug.dockerfile index 1986bd83..b2593507 100644 --- a/tests/docker/runners/images/debug.dockerfile +++ b/tests/docker/runners/images/debug.dockerfile @@ -19,9 +19,10 @@ ARG FRED_REDIS_SENTINEL_HOST ARG FRED_REDIS_SENTINEL_PORT ARG CIRCLECI_TESTS -RUN USER=root apt-get update && apt-get install -y build-essential libssl-dev dnsutils curl pkg-config cmake git +RUN USER=root apt-get update && apt-get install -y build-essential libssl-dev dnsutils curl pkg-config cmake git vim linux-perf RUN echo "REDIS_VERSION=$REDIS_VERSION" # For debugging RUN cargo --version && rustc --version -RUN rustup component add clippy && rustup install nightly \ No newline at end of file +RUN rustup component add clippy && rustup install nightly +RUN cargo install flamegraph \ No newline at end of file diff --git a/tests/integration/acl/mod.rs b/tests/integration/acl/mod.rs index 52a3673f..7d7ecefb 100644 --- a/tests/integration/acl/mod.rs +++ b/tests/integration/acl/mod.rs @@ -1,9 +1,9 @@ use super::utils::{read_env_var, should_use_sentinel_config}; use fred::{ - clients::RedisClient, - error::RedisError, + clients::Client, + error::Error, interfaces::*, - types::{RedisConfig, RedisValue}, + types::{config::Config, Value}, }; use std::collections::HashMap; @@ -22,33 +22,33 @@ fn check_env_creds() -> (Option, Option) { } // note: currently this only works in CI against the centralized server -pub async fn should_auth_as_test_user(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_auth_as_test_user(client: Client, _: Config) -> Result<(), Error> { let (username, password) = check_env_creds(); if let Some(password) = password { - client.auth(username, password).await?; - client.ping().await?; + let _: () = client.auth(username, password).await?; + let _: () = client.ping(None).await?; } Ok(()) } // FIXME currently this only works in CI against the centralized server -pub async fn should_auth_as_test_user_via_config(_: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_auth_as_test_user_via_config(_: Client, mut config: Config) -> Result<(), Error> { let (username, password) = check_env_creds(); if let Some(password) = password { config.username = username; config.password = Some(password); - let client = RedisClient::new(config, None, None, None); + let client = Client::new(config, None, None, None); client.connect(); client.wait_for_connect().await?; - client.ping().await?; + let _: () = client.ping(None).await?; } Ok(()) } -pub async fn should_run_acl_getuser(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let user: HashMap = client.acl_getuser("default").await?; +pub async fn should_run_acl_getuser(client: Client, _: Config) -> Result<(), Error> { + let user: HashMap = client.acl_getuser("default").await?; let flags: Vec = user.get("flags").unwrap().clone().convert()?; assert!(flags.contains(&"on".to_string())); diff --git a/tests/integration/centralized.rs b/tests/integration/centralized.rs index 94b3ab3e..7f7f1650 100644 --- a/tests/integration/centralized.rs +++ b/tests/integration/centralized.rs @@ -72,7 +72,7 @@ mod other { #[cfg(all(feature = "i-client", feature = "i-lists"))] centralized_test!(other, should_error_when_blocked); #[cfg(all(feature = "i-keys", feature = "i-hashes"))] - centralized_test!(other, should_smoke_test_from_redis_impl); + centralized_test!(other, should_smoke_test_from_value_impl); #[cfg(feature = "i-keys")] centralized_test!(other, should_pipeline_all); #[cfg(all(feature = "i-keys", feature = "i-hashes"))] @@ -133,7 +133,9 @@ mod hashes { centralized_test!(hashes, should_get_random_field); centralized_test!(hashes, should_get_strlen); centralized_test!(hashes, should_get_values); + #[cfg(feature = "i-hexpire")] centralized_test!(hashes, should_do_hash_expirations); + #[cfg(feature = "i-hexpire")] centralized_test!(hashes, should_do_hash_pexpirations); } @@ -170,6 +172,8 @@ mod scanning { centralized_test!(scanning, should_scan_buffered); #[cfg(feature = "i-keys")] centralized_test!(scanning, should_continue_scanning_on_page_drop); + #[cfg(feature = "i-keys")] + centralized_test!(scanning, should_scan_by_page_centralized); } #[cfg(feature = "i-slowlog")] @@ -183,12 +187,12 @@ mod slowlog { mod server { centralized_test!(server, should_flushall); centralized_test!(server, should_read_server_info); - centralized_test!(server, should_ping_server); - centralized_test!(server, should_run_custom_command); + centralized_test!(server, should_ping_pong_command); centralized_test!(server, should_read_last_save); centralized_test!(server, should_read_db_size); centralized_test!(server, should_start_bgsave); centralized_test!(server, should_do_bgrewriteaof); + centralized_test!(server, should_select_index_command); } #[cfg(feature = "i-sets")] @@ -274,6 +278,7 @@ pub mod sorted_sets { centralized_test!(sorted_sets, should_zrangebyscore); centralized_test!(sorted_sets, should_zrevrangebyscore); centralized_test!(sorted_sets, should_zrank_values); + centralized_test!(sorted_sets, should_zrank_values_withscore); centralized_test!(sorted_sets, should_zrem_values); centralized_test!(sorted_sets, should_zremrangebylex); centralized_test!(sorted_sets, should_zremrangebyrank); diff --git a/tests/integration/client/mod.rs b/tests/integration/client/mod.rs index 1bb96e11..2c49e7ef 100644 --- a/tests/integration/client/mod.rs +++ b/tests/integration/client/mod.rs @@ -1,7 +1,7 @@ use fred::prelude::*; #[cfg(feature = "i-client")] -pub async fn should_echo_message(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_echo_message(client: Client, _: Config) -> Result<(), Error> { let res: String = client.echo("hello world!").await?; assert_eq!(res, "hello world!"); Ok(()) diff --git a/tests/integration/cluster/mod.rs b/tests/integration/cluster/mod.rs index 36f5eed7..a1a076f5 100644 --- a/tests/integration/cluster/mod.rs +++ b/tests/integration/cluster/mod.rs @@ -1,9 +1,9 @@ #![allow(unused_imports)] -use fred::{error::RedisError, interfaces::*, prelude::RedisClient, types::RedisConfig}; +use fred::{error::Error, interfaces::*, prelude::Client, types::config::Config}; #[cfg(all(feature = "i-cluster", feature = "i-client"))] -pub async fn should_use_each_cluster_node(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let connections = client.active_connections().await?; +pub async fn should_use_each_cluster_node(client: Client, _: Config) -> Result<(), Error> { + let connections = client.active_connections(); let mut servers = Vec::new(); for server in connections.iter() { diff --git a/tests/integration/clustered.rs b/tests/integration/clustered.rs index 83a20ea9..4dbf72cd 100644 --- a/tests/integration/clustered.rs +++ b/tests/integration/clustered.rs @@ -104,10 +104,14 @@ mod other { cluster_test!(other, should_replica_set_and_get); #[cfg(all(feature = "replicas", feature = "i-keys"))] cluster_test!(other, should_replica_set_and_get_not_lazy); + #[cfg(feature = "replicas")] + cluster_test!(other, should_create_non_lazy_replica_connections); #[cfg(all(feature = "replicas", feature = "i-keys"))] cluster_test!(other, should_use_cluster_replica_without_redirection); - //#[cfg(all(feature = "replicas", feature = "i-keys"))] - // cluster_test!(other, should_combine_options_and_replicas); + #[cfg(all(feature = "replicas", feature = "i-keys"))] + cluster_test!(other, should_combine_options_and_replicas_non_lazy); + #[cfg(all(feature = "replicas", feature = "i-keys"))] + cluster_test!(other, should_combine_options_and_replicas); #[cfg(all(feature = "replicas", feature = "i-keys"))] cluster_test!(other, should_pipeline_with_replicas); } @@ -136,7 +140,9 @@ mod hashes { cluster_test!(hashes, should_get_random_field); cluster_test!(hashes, should_get_strlen); cluster_test!(hashes, should_get_values); + #[cfg(feature = "i-hexpire")] cluster_test!(hashes, should_do_hash_expirations); + #[cfg(feature = "i-hexpire")] cluster_test!(hashes, should_do_hash_pexpirations); } @@ -179,6 +185,8 @@ mod scanning { cluster_test!(scanning, should_scan_cluster_buffered); #[cfg(feature = "i-keys")] cluster_test!(scanning, should_continue_scanning_on_page_drop); + #[cfg(all(feature = "i-keys", feature = "i-cluster"))] + cluster_test!(scanning, should_scan_by_page_clustered); } #[cfg(feature = "i-slowlog")] @@ -192,8 +200,7 @@ mod slowlog { mod server { cluster_test!(server, should_flushall); cluster_test!(server, should_read_server_info); - cluster_test!(server, should_ping_server); - cluster_test!(server, should_run_custom_command); + cluster_test!(server, should_ping_pong_command); cluster_test!(server, should_read_last_save); cluster_test!(server, should_read_db_size); cluster_test!(server, should_start_bgsave); @@ -285,6 +292,7 @@ pub mod sorted_sets { cluster_test!(sorted_sets, should_zrangebyscore); cluster_test!(sorted_sets, should_zrevrangebyscore); cluster_test!(sorted_sets, should_zrank_values); + cluster_test!(sorted_sets, should_zrank_values_withscore); cluster_test!(sorted_sets, should_zrem_values); cluster_test!(sorted_sets, should_zremrangebylex); cluster_test!(sorted_sets, should_zremrangebyrank); diff --git a/tests/integration/geo/mod.rs b/tests/integration/geo/mod.rs index 0bbf9fc7..65a5a3b5 100644 --- a/tests/integration/geo/mod.rs +++ b/tests/integration/geo/mod.rs @@ -1,6 +1,9 @@ use fred::{ prelude::*, - types::{GeoPosition, GeoRadiusInfo, GeoUnit, GeoValue, SortOrder}, + types::{ + geo::{GeoPosition, GeoRadiusInfo, GeoUnit, GeoValue}, + SortOrder, + }, }; use std::convert::TryInto; @@ -14,7 +17,7 @@ fn loose_eq_pos(lhs: &GeoPosition, rhs: &GeoPosition) -> bool { loose_eq(lhs.longitude, rhs.longitude, 5) && loose_eq(lhs.latitude, rhs.latitude, 5) } -async fn create_fake_data(client: &RedisClient, key: &str) -> Result, RedisError> { +async fn create_fake_data(client: &Client, key: &str) -> Result, Error> { // GEOADD key 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" let values = vec![ @@ -22,11 +25,11 @@ async fn create_fake_data(client: &RedisClient, key: &str) -> Result Result<(), RedisError> { +pub async fn should_geoadd_values(client: Client, _: Config) -> Result<(), Error> { let values: Vec = vec![ (13.361389, 38.115556, "Palermo").try_into()?, (15.087269, 37.502669, "Catania").try_into()?, @@ -42,7 +45,7 @@ pub async fn should_geoadd_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_geohash_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_geohash_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_fake_data(&client, "foo").await?; let result: String = client.geohash("foo", "Palermo").await?; @@ -56,10 +59,10 @@ pub async fn should_geohash_values(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_geopos_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_geopos_values(client: Client, _: Config) -> Result<(), Error> { let expected = create_fake_data(&client, "foo").await?; - let result: RedisValue = client.geopos("foo", vec!["Palermo", "Catania"]).await?; + let result: Value = client.geopos("foo", vec!["Palermo", "Catania"]).await?; let result: Vec = result .into_array() .into_iter() @@ -71,18 +74,18 @@ pub async fn should_geopos_values(client: RedisClient, _: RedisConfig) -> Result } } - let result: Vec = client.geopos("foo", "Palermo").await?; + let result: Vec = client.geopos("foo", "Palermo").await?; let result = result[0].as_geo_position().unwrap().unwrap(); assert!(loose_eq_pos(&result, &expected[0])); - let result: Vec = client.geopos("foo", "Catania").await?; + let result: Vec = client.geopos("foo", "Catania").await?; let result = result[0].as_geo_position().unwrap().unwrap(); assert!(loose_eq_pos(&result, &expected[1])); Ok(()) } -pub async fn should_geodist_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_geodist_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_fake_data(&client, "foo").await?; let result: f64 = client.geodist("foo", "Palermo", "Catania", None).await?; @@ -99,11 +102,11 @@ pub async fn should_geodist_values(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_georadius_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_georadius_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_fake_data(&client, "foo").await?; let result = client - .georadius::( + .georadius::( "foo", (15.0, 37.0), 200.0, @@ -135,7 +138,7 @@ pub async fn should_georadius_values(client: RedisClient, _: RedisConfig) -> Res assert_eq!(result, expected); let result = client - .georadius::( + .georadius::( "foo", (15.0, 37.0), 200.0, @@ -167,7 +170,7 @@ pub async fn should_georadius_values(client: RedisClient, _: RedisConfig) -> Res assert_eq!(result, expected); let result = client - .georadius::( + .georadius::( "foo", (15.0, 37.0), 200.0, @@ -201,13 +204,13 @@ pub async fn should_georadius_values(client: RedisClient, _: RedisConfig) -> Res Ok(()) } -pub async fn should_georadiusbymember_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_georadiusbymember_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_fake_data(&client, "foo").await?; let agrigento: GeoValue = (13.583333, 37.316667, "Agrigento").try_into()?; - client.geoadd("foo", None, false, agrigento).await?; + let _: () = client.geoadd("foo", None, false, agrigento).await?; let result = client - .georadiusbymember::( + .georadiusbymember::( "foo", "Agrigento", 100.0, @@ -241,17 +244,17 @@ pub async fn should_georadiusbymember_values(client: RedisClient, _: RedisConfig Ok(()) } -pub async fn should_geosearch_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_geosearch_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_fake_data(&client, "foo").await?; let values = vec![ (12.758489, 38.788135, "edge1").try_into()?, (17.241510, 38.788135, "edge2").try_into()?, ]; - client.geoadd("foo", None, false, values).await?; + let _: () = client.geoadd("foo", None, false, values).await?; let lonlat: GeoPosition = (15.0, 37.0).into(); let result = client - .geosearch::( + .geosearch::( "foo", None, Some(lonlat.clone()), @@ -282,7 +285,7 @@ pub async fn should_geosearch_values(client: RedisClient, _: RedisConfig) -> Res assert_eq!(result, expected); let result = client - .geosearch::( + .geosearch::( "foo", None, Some(lonlat), diff --git a/tests/integration/hashes/mod.rs b/tests/integration/hashes/mod.rs index e7e5b276..d4b12d80 100644 --- a/tests/integration/hashes/mod.rs +++ b/tests/integration/hashes/mod.rs @@ -1,9 +1,9 @@ use crate::integration::utils; use fred::{ - clients::RedisClient, - error::RedisError, + clients::Client, + error::Error, interfaces::*, - types::{RedisConfig, RedisValue}, + types::{config::Config, Value}, }; use std::{ collections::{HashMap, HashSet}, @@ -20,8 +20,8 @@ fn assert_contains(values: Vec, item: &T) { panic!("Failed to find item in set."); } -fn assert_diff_len(values: Vec<&'static str>, value: RedisValue, len: usize) { - if let RedisValue::Array(items) = value { +fn assert_diff_len(values: Vec<&'static str>, value: Value, len: usize) { + if let Value::Array(items) = value { let mut expected = HashSet::with_capacity(values.len()); for value in values.into_iter() { expected.insert(value.to_owned()); @@ -39,7 +39,7 @@ fn assert_diff_len(values: Vec<&'static str>, value: RedisValue, len: usize) { } } -pub async fn should_hset_and_hget(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_hset_and_hget(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.hset("foo", ("a", 1)).await?; assert_eq!(result, 1); let result: i64 = client.hset("foo", vec![("b", 2), ("c", 3)]).await?; @@ -55,7 +55,7 @@ pub async fn should_hset_and_hget(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_hset_and_hdel(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_hset_and_hdel(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; assert_eq!(result, 3); let result: i64 = client.hdel("foo", vec!["a", "b"]).await?; @@ -68,8 +68,8 @@ pub async fn should_hset_and_hdel(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_hexists(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.hset("foo", ("a", 1)).await?; +pub async fn should_hexists(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.hset("foo", ("a", 1)).await?; let a: bool = client.hexists("foo", "a").await?; assert!(a); let b: bool = client.hexists("foo", "b").await?; @@ -78,8 +78,8 @@ pub async fn should_hexists(client: RedisClient, _: RedisConfig) -> Result<(), R Ok(()) } -pub async fn should_hgetall(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; +pub async fn should_hgetall(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let values: HashMap = client.hgetall("foo").await?; assert_eq!(values.len(), 3); @@ -92,7 +92,7 @@ pub async fn should_hgetall(client: RedisClient, _: RedisConfig) -> Result<(), R Ok(()) } -pub async fn should_hincryby(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_hincryby(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.hincrby("foo", "a", 1).await?; assert_eq!(result, 1); let result: i64 = client.hincrby("foo", "a", 2).await?; @@ -101,7 +101,7 @@ pub async fn should_hincryby(client: RedisClient, _: RedisConfig) -> Result<(), Ok(()) } -pub async fn should_hincryby_float(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_hincryby_float(client: Client, _: Config) -> Result<(), Error> { let result: f64 = client.hincrbyfloat("foo", "a", 0.5).await?; assert_eq!(result, 0.5); let result: f64 = client.hincrbyfloat("foo", "a", 3.7).await?; @@ -110,8 +110,8 @@ pub async fn should_hincryby_float(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_get_keys(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; +pub async fn should_get_keys(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let keys = client.hkeys("foo").await?; assert_diff_len(vec!["a", "b", "c"], keys, 0); @@ -119,8 +119,8 @@ pub async fn should_get_keys(client: RedisClient, _: RedisConfig) -> Result<(), Ok(()) } -pub async fn should_hmset(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; +pub async fn should_hmset(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let a: i64 = client.hget("foo", "a").await?; assert_eq!(a, 1); @@ -132,8 +132,8 @@ pub async fn should_hmset(client: RedisClient, _: RedisConfig) -> Result<(), Red Ok(()) } -pub async fn should_hmget(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; +pub async fn should_hmget(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let result: Vec = client.hmget("foo", vec!["a", "b"]).await?; assert_eq!(result, vec![1, 2]); @@ -141,8 +141,8 @@ pub async fn should_hmget(client: RedisClient, _: RedisConfig) -> Result<(), Red Ok(()) } -pub async fn should_hsetnx(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.hset("foo", ("a", 1)).await?; +pub async fn should_hsetnx(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.hset("foo", ("a", 1)).await?; let result: bool = client.hsetnx("foo", "a", 2).await?; assert!(!result); let result: i64 = client.hget("foo", "a").await?; @@ -155,8 +155,8 @@ pub async fn should_hsetnx(client: RedisClient, _: RedisConfig) -> Result<(), Re Ok(()) } -pub async fn should_get_random_field(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; +pub async fn should_get_random_field(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let field: String = client.hrandfield("foo", None).await?; assert_contains(vec!["a", "b", "c"], &field.as_str()); @@ -180,9 +180,9 @@ pub async fn should_get_random_field(client: RedisClient, _: RedisConfig) -> Res Ok(()) } -pub async fn should_get_strlen(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_strlen(client: Client, _: Config) -> Result<(), Error> { let expected = "abcdefhijklmnopqrstuvwxyz"; - client.hset("foo", ("a", expected)).await?; + let _: () = client.hset("foo", ("a", expected)).await?; let len: usize = client.hstrlen("foo", "a").await?; assert_eq!(len, expected.len()); @@ -190,16 +190,17 @@ pub async fn should_get_strlen(client: RedisClient, _: RedisConfig) -> Result<() Ok(()) } -pub async fn should_get_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.hmset("foo", vec![("a", "1"), ("b", "2")]).await?; +pub async fn should_get_values(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.hmset("foo", vec![("a", "1"), ("b", "2")]).await?; - let values: RedisValue = client.hvals("foo").await?; + let values: Value = client.hvals("foo").await?; assert_diff_len(vec!["1", "2"], values, 0); Ok(()) } -pub async fn should_do_hash_expirations(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +#[cfg(feature = "i-hexpire")] +pub async fn should_do_hash_expirations(client: Client, _: Config) -> Result<(), Error> { if utils::check_valkey(&client).await { return Ok(()); } @@ -230,7 +231,8 @@ pub async fn should_do_hash_expirations(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_do_hash_pexpirations(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +#[cfg(feature = "i-hexpire")] +pub async fn should_do_hash_pexpirations(client: Client, _: Config) -> Result<(), Error> { if utils::check_valkey(&client).await { return Ok(()); } diff --git a/tests/integration/hyperloglog/mod.rs b/tests/integration/hyperloglog/mod.rs index f39c491d..f10bcbb6 100644 --- a/tests/integration/hyperloglog/mod.rs +++ b/tests/integration/hyperloglog/mod.rs @@ -1,6 +1,6 @@ use fred::prelude::*; -pub async fn should_pfadd_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pfadd_elements(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.pfadd("foo", vec!["a", "b"]).await?; assert_eq!(result, 1); let result: i64 = client.pfadd("foo", "a").await?; @@ -9,7 +9,7 @@ pub async fn should_pfadd_elements(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_pfcount_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pfcount_elements(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.pfadd("foo", vec!["a", "b", "c"]).await?; assert_eq!(result, 1); let result: i64 = client.pfcount("foo").await?; @@ -22,13 +22,13 @@ pub async fn should_pfcount_elements(client: RedisClient, _: RedisConfig) -> Res Ok(()) } -pub async fn should_pfmerge_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pfmerge_elements(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.pfadd("foo{1}", vec!["a", "b", "c"]).await?; assert_eq!(result, 1); let result: i64 = client.pfadd("bar{1}", vec!["c", "d", "e"]).await?; assert_eq!(result, 1); - client.pfmerge("baz{1}", vec!["foo{1}", "bar{1}"]).await?; + let _: () = client.pfmerge("baz{1}", vec!["foo{1}", "bar{1}"]).await?; let result: i64 = client.pfcount("baz{1}").await?; assert_eq!(result, 5); diff --git a/tests/integration/keys/mod.rs b/tests/integration/keys/mod.rs index 6655a376..8a73b287 100644 --- a/tests/integration/keys/mod.rs +++ b/tests/integration/keys/mod.rs @@ -1,34 +1,40 @@ use bytes::Bytes; use fred::{ - clients::{RedisClient, RedisPool}, - error::RedisError, + clients::{Client, Pool}, + error::Error, interfaces::*, - types::{Expiration, ReconnectPolicy, RedisConfig, RedisMap, RedisValue}, + types::{ + config::{Config, ReconnectPolicy}, + Expiration, + ExpireOptions, + Map, + Value, + }, }; use futures::{pin_mut, StreamExt}; use std::{collections::HashMap, time::Duration}; use tokio::{self, time::sleep}; #[cfg(feature = "default-nil-types")] -pub async fn should_handle_missing_keys(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_handle_missing_keys(client: Client, _: Config) -> Result<(), Error> { assert!(client.get::("foo").await?.is_empty()); Ok(()) } #[cfg(not(feature = "default-nil-types"))] -pub async fn should_handle_missing_keys(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_handle_missing_keys(client: Client, _: Config) -> Result<(), Error> { assert!(client.get::("foo").await.is_err()); Ok(()) } -pub async fn should_set_and_get_a_value(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "bar", None, None, false).await?; +pub async fn should_set_and_get_a_value(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("foo", "bar", None, None, false).await?; assert_eq!(client.get::("foo").await?, "bar"); Ok(()) } -pub async fn should_set_and_del_a_value(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_set_and_del_a_value(client: Client, _config: Config) -> Result<(), Error> { let result: Option = client.set("foo", "bar", None, None, true).await?; assert!(result.is_none()); @@ -38,8 +44,8 @@ pub async fn should_set_and_del_a_value(client: RedisClient, _config: RedisConfi Ok(()) } -pub async fn should_set_with_get_argument(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "bar", None, None, false).await?; +pub async fn should_set_with_get_argument(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("foo", "bar", None, None, false).await?; let result: String = client.set("foo", "baz", None, None, true).await?; assert_eq!(result, "bar"); @@ -50,39 +56,36 @@ pub async fn should_set_with_get_argument(client: RedisClient, _config: RedisCon Ok(()) } -pub async fn should_rename(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("{foo}.1", "baz", None, None, false).await?; +pub async fn should_rename(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("{foo}.1", "baz", None, None, false).await?; - client.rename("{foo}.1", "{foo}.2").await?; + let _: () = client.rename("{foo}.1", "{foo}.2").await?; let result: String = client.get("{foo}.2").await?; assert_eq!(result, "baz"); Ok(()) } -pub async fn should_error_rename_does_not_exist(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_error_rename_does_not_exist(client: Client, _config: Config) -> Result<(), Error> { client.rename("{foo}", "{foo}.bar").await } -pub async fn should_renamenx(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("{foo}.1", "baz", None, None, false).await?; +pub async fn should_renamenx(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("{foo}.1", "baz", None, None, false).await?; - client.renamenx("{foo}.1", "{foo}.2").await?; + let _: () = client.renamenx("{foo}.1", "{foo}.2").await?; let result: String = client.get("{foo}.2").await?; assert_eq!(result, "baz"); Ok(()) } -pub async fn should_error_renamenx_does_not_exist( - client: RedisClient, - _config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_error_renamenx_does_not_exist(client: Client, _config: Config) -> Result<(), Error> { client.renamenx("{foo}", "{foo}.bar").await } -pub async fn should_unlink(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("{foo}1", "bar", None, None, false).await?; +pub async fn should_unlink(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("{foo}1", "bar", None, None, false).await?; assert_eq!(client.get::("{foo}1").await?, "bar"); assert_eq!( @@ -95,7 +98,7 @@ pub async fn should_unlink(client: RedisClient, _config: RedisConfig) -> Result< Ok(()) } -pub async fn should_incr_and_decr_a_value(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_incr_and_decr_a_value(client: Client, _config: Config) -> Result<(), Error> { let count: u64 = client.incr("foo").await?; assert_eq!(count, 1); let count: u64 = client.incr_by("foo", 2).await?; @@ -108,7 +111,7 @@ pub async fn should_incr_and_decr_a_value(client: RedisClient, _config: RedisCon Ok(()) } -pub async fn should_incr_by_float(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_incr_by_float(client: Client, _config: Config) -> Result<(), Error> { let count: f64 = client.incr_by_float("foo", 1.5).await?; assert_eq!(count, 1.5); let count: f64 = client.incr_by_float("foo", 2.2).await?; @@ -119,8 +122,8 @@ pub async fn should_incr_by_float(client: RedisClient, _config: RedisConfig) -> Ok(()) } -pub async fn should_mset_a_non_empty_map(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - let mut map: HashMap = HashMap::new(); +pub async fn should_mset_a_non_empty_map(client: Client, _config: Config) -> Result<(), Error> { + let mut map: HashMap = HashMap::new(); // MSET args all have to map to the same cluster node map.insert("a{1}".into(), 1.into()); map.insert("b{1}".into(), 2.into()); @@ -139,23 +142,25 @@ pub async fn should_mset_a_non_empty_map(client: RedisClient, _config: RedisConf } // should panic -pub async fn should_error_mset_empty_map(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.mset(RedisMap::new()).await.map(|_| ()) +pub async fn should_error_mset_empty_map(client: Client, _config: Config) -> Result<(), Error> { + client.mset(Map::new()).await.map(|_| ()) } -pub async fn should_expire_key(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "bar", None, None, false).await?; +pub async fn should_expire_key(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("foo", "bar", None, None, false).await?; - client.expire("foo", 1).await?; - sleep(Duration::from_millis(1500)).await; + let _: () = client.expire("foo", 2, None).await?; + let res: i64 = client.expire("foo", 1, Some(ExpireOptions::GT)).await?; + assert_eq!(res, 0); + sleep(Duration::from_millis(2500)).await; let foo: Option = client.get("foo").await?; assert!(foo.is_none()); Ok(()) } -pub async fn should_persist_key(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "bar", Some(Expiration::EX(5)), None, false).await?; +pub async fn should_persist_key(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("foo", "bar", Some(Expiration::EX(5)), None, false).await?; let removed: bool = client.persist("foo").await?; assert!(removed); @@ -166,8 +171,8 @@ pub async fn should_persist_key(client: RedisClient, _config: RedisConfig) -> Re Ok(()) } -pub async fn should_check_ttl(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "bar", Some(Expiration::EX(5)), None, false).await?; +pub async fn should_check_ttl(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("foo", "bar", Some(Expiration::EX(5)), None, false).await?; let ttl: i64 = client.ttl("foo").await?; assert!(ttl > 0 && ttl < 6); @@ -175,8 +180,8 @@ pub async fn should_check_ttl(client: RedisClient, _config: RedisConfig) -> Resu Ok(()) } -pub async fn should_check_pttl(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "bar", Some(Expiration::EX(5)), None, false).await?; +pub async fn should_check_pttl(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("foo", "bar", Some(Expiration::EX(5)), None, false).await?; let ttl: i64 = client.pttl("foo").await?; assert!(ttl > 0 && ttl < 5001); @@ -184,42 +189,42 @@ pub async fn should_check_pttl(client: RedisClient, _config: RedisConfig) -> Res Ok(()) } -pub async fn should_dump_key(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "abc123", None, None, false).await?; - let dump: RedisValue = client.dump("foo").await?; +pub async fn should_dump_key(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.set("foo", "abc123", None, None, false).await?; + let dump: Value = client.dump("foo").await?; assert!(dump.is_bytes()); Ok(()) } -pub async fn should_dump_and_restore_key(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_dump_and_restore_key(client: Client, _: Config) -> Result<(), Error> { let expected = "abc123"; - client.set("foo", expected, None, None, false).await?; + let _: () = client.set("foo", expected, None, None, false).await?; let dump = client.dump("foo").await?; - client.del("foo").await?; + let _: () = client.del("foo").await?; - client.restore("foo", 0, dump, false, false, None, None).await?; + let _: () = client.restore("foo", 0, dump, false, false, None, None).await?; let value: String = client.get("foo").await?; assert_eq!(value, expected); Ok(()) } -pub async fn should_modify_ranges(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "0123456789", None, None, false).await?; +pub async fn should_modify_ranges(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.set("foo", "0123456789", None, None, false).await?; let range: String = client.getrange("foo", 0, 4).await?; assert_eq!(range, "01234"); - client.setrange("foo", 4, "abc").await?; + let _: () = client.setrange("foo", 4, "abc").await?; let value: String = client.get("foo").await?; assert_eq!(value, "0123abc789"); Ok(()) } -pub async fn should_getset_value(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_getset_value(client: Client, _: Config) -> Result<(), Error> { let value: Option = client.getset("foo", "bar").await?; assert!(value.is_none()); let value: String = client.getset("foo", "baz").await?; @@ -230,11 +235,11 @@ pub async fn should_getset_value(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_getdel_value(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_getdel_value(client: Client, _: Config) -> Result<(), Error> { let value: Option = client.getdel("foo").await?; assert!(value.is_none()); - client.set("foo", "bar", None, None, false).await?; + let _: () = client.set("foo", "bar", None, None, false).await?; let value: String = client.getdel("foo").await?; assert_eq!(value, "bar"); let value: Option = client.get("foo").await?; @@ -243,19 +248,19 @@ pub async fn should_getdel_value(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_get_strlen(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_strlen(client: Client, _: Config) -> Result<(), Error> { let expected = "abcdefghijklmnopqrstuvwxyz"; - client.set("foo", expected, None, None, false).await?; + let _: () = client.set("foo", expected, None, None, false).await?; let len: usize = client.strlen("foo").await?; assert_eq!(len, expected.len()); Ok(()) } -pub async fn should_mget_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let expected: Vec<(&str, RedisValue)> = vec![("a{1}", 1.into()), ("b{1}", 2.into()), ("c{1}", 3.into())]; +pub async fn should_mget_values(client: Client, _: Config) -> Result<(), Error> { + let expected: Vec<(&str, Value)> = vec![("a{1}", 1.into()), ("b{1}", 2.into()), ("c{1}", 3.into())]; for (key, value) in expected.iter() { - client.set(*key, value.clone(), None, None, false).await?; + let _: () = client.set(*key, value.clone(), None, None, false).await?; } let values: Vec = client.mget(vec!["a{1}", "b{1}", "c{1}"]).await?; assert_eq!(values, vec![1, 2, 3]); @@ -263,8 +268,8 @@ pub async fn should_mget_values(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_msetnx_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let expected: Vec<(&str, RedisValue)> = vec![("a{1}", 1.into()), ("b{1}", 2.into())]; +pub async fn should_msetnx_values(client: Client, _: Config) -> Result<(), Error> { + let expected: Vec<(&str, Value)> = vec![("a{1}", 1.into()), ("b{1}", 2.into())]; // do it first, check they're there let values: i64 = client.msetnx(expected.clone()).await?; @@ -274,8 +279,8 @@ pub async fn should_msetnx_values(client: RedisClient, _: RedisConfig) -> Result assert_eq!(a, 1); assert_eq!(b, 2); - client.del(vec!["a{1}", "b{1}"]).await?; - client.set("a{1}", 3, None, None, false).await?; + let _: () = client.del(vec!["a{1}", "b{1}"]).await?; + let _: () = client.set("a{1}", 3, None, None, false).await?; let values: i64 = client.msetnx(expected.clone()).await?; assert_eq!(values, 0); @@ -285,15 +290,15 @@ pub async fn should_msetnx_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_copy_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.set("a{1}", "bar", None, None, false).await?; +pub async fn should_copy_values(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.set("a{1}", "bar", None, None, false).await?; let result: i64 = client.copy("a{1}", "b{1}", None, false).await?; assert_eq!(result, 1); let b: String = client.get("b{1}").await?; assert_eq!(b, "bar"); - client.set("a{1}", "baz", None, None, false).await?; + let _: () = client.set("a{1}", "baz", None, None, false).await?; let result: i64 = client.copy("a{1}", "b{1}", None, false).await?; assert_eq!(result, 0); @@ -305,13 +310,10 @@ pub async fn should_copy_values(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_get_keys_from_pool_in_a_stream( - client: RedisClient, - config: RedisConfig, -) -> Result<(), RedisError> { - client.set("foo", "bar", None, None, false).await?; +pub async fn should_get_keys_from_pool_in_a_stream(client: Client, config: Config) -> Result<(), Error> { + let _: () = client.set("foo", "bar", None, None, false).await?; - let pool = RedisPool::new(config, None, None, None, 5)?; + let pool = Pool::new(config, None, None, None, 5)?; pool.connect(); pool.wait_for_connect().await?; @@ -339,16 +341,17 @@ pub async fn should_get_keys_from_pool_in_a_stream( Ok(()) } -pub async fn should_pexpire_key(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pexpire_key(client: Client, _: Config) -> Result<(), Error> { let _: () = client.set("foo", "bar", None, None, false).await?; - assert_eq!(client.pexpire::("foo", 100, None).await?, 1); + assert_eq!(client.pexpire::("foo", 300, None).await?, 1); + assert_eq!(client.pexpire::("foo", 100, Some(ExpireOptions::GT)).await?, 0); - sleep(Duration::from_millis(150)).await; + sleep(Duration::from_millis(350)).await; assert_eq!(client.get::, _>("foo").await?, None); Ok(()) } -pub async fn should_setnx_value(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_setnx_value(client: Client, _: Config) -> Result<(), Error> { let value_set: i64 = client.setnx("foo", 123456).await?; assert_eq!(value_set, 1); @@ -364,7 +367,7 @@ pub async fn should_setnx_value(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_expire_time_value(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_expire_time_value(client: Client, _: Config) -> Result<(), Error> { let _: () = client.set("foo", "bar", Some(Expiration::EX(60)), None, false).await?; let expiration: i64 = client.expire_time("foo").await?; assert!(expiration > 0); @@ -372,7 +375,7 @@ pub async fn should_expire_time_value(client: RedisClient, _: RedisConfig) -> Re Ok(()) } -pub async fn should_pexpire_time_value(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pexpire_time_value(client: Client, _: Config) -> Result<(), Error> { let _: () = client.set("foo", "bar", Some(Expiration::EX(60)), None, false).await?; let expiration: i64 = client.pexpire_time("foo").await?; assert!(expiration > 0); @@ -381,7 +384,7 @@ pub async fn should_pexpire_time_value(client: RedisClient, _: RedisConfig) -> R } #[cfg(all(feature = "i-keys", feature = "i-hashes", feature = "i-sets"))] -pub async fn should_check_type_of_key(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_check_type_of_key(client: Client, _: Config) -> Result<(), Error> { let _: () = client.set("foo1", "bar", None, None, false).await?; let _: () = client.hset("foo2", ("a", "b")).await?; let _: () = client.sadd("foo3", "c").await?; diff --git a/tests/integration/lists/mod.rs b/tests/integration/lists/mod.rs index 29df249c..4373c92d 100644 --- a/tests/integration/lists/mod.rs +++ b/tests/integration/lists/mod.rs @@ -1,35 +1,38 @@ use fred::{ interfaces::*, prelude::*, - types::{LMoveDirection, ListLocation, SortOrder}, + types::{ + lists::{LMoveDirection, ListLocation}, + SortOrder, + }, }; use std::time::Duration; use tokio::time::sleep; const COUNT: i64 = 10; -async fn create_count_data(client: &RedisClient, key: &str) -> Result, RedisError> { +async fn create_count_data(client: &Client, key: &str) -> Result, Error> { let mut values = Vec::with_capacity(COUNT as usize); for idx in 0 .. COUNT { - client.rpush(key, idx).await?; + let _: () = client.rpush(key, idx).await?; values.push(idx.to_string().into()); } Ok(values) } -pub async fn should_blpop_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_blpop_values(client: Client, _: Config) -> Result<(), Error> { let publisher = client.clone_new(); publisher.connect(); publisher.wait_for_connect().await?; let jh = tokio::spawn(async move { for idx in 0 .. COUNT { - let mut result: Vec = client.blpop("foo", 30.0).await?; + let mut result: Vec = client.blpop("foo", 30.0).await?; assert_eq!(result.pop().unwrap().as_i64().unwrap(), idx); } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); for idx in 0 .. COUNT { @@ -44,18 +47,18 @@ pub async fn should_blpop_values(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_brpop_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_brpop_values(client: Client, _: Config) -> Result<(), Error> { let publisher = client.clone_new(); publisher.connect(); publisher.wait_for_connect().await?; let jh = tokio::spawn(async move { for idx in 0 .. COUNT { - let mut result: Vec = client.brpop("foo", 30.0).await?; + let mut result: Vec = client.brpop("foo", 30.0).await?; assert_eq!(result.pop().unwrap().as_i64().unwrap(), idx); } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); for idx in 0 .. COUNT { @@ -70,7 +73,7 @@ pub async fn should_brpop_values(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_brpoplpush_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_brpoplpush_values(client: Client, _: Config) -> Result<(), Error> { let publisher = client.clone_new(); publisher.connect(); publisher.wait_for_connect().await?; @@ -81,7 +84,7 @@ pub async fn should_brpoplpush_values(client: RedisClient, _: RedisConfig) -> Re assert_eq!(result, idx); } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); for idx in 0 .. COUNT { @@ -98,7 +101,7 @@ pub async fn should_brpoplpush_values(client: RedisClient, _: RedisConfig) -> Re Ok(()) } -pub async fn should_blmove_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_blmove_values(client: Client, _: Config) -> Result<(), Error> { let publisher = client.clone_new(); publisher.connect(); publisher.wait_for_connect().await?; @@ -111,7 +114,7 @@ pub async fn should_blmove_values(client: RedisClient, _: RedisConfig) -> Result assert_eq!(result, idx); } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); for idx in 0 .. COUNT { @@ -128,37 +131,37 @@ pub async fn should_blmove_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_lindex_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_lindex_values(client: Client, _: Config) -> Result<(), Error> { let expected = create_count_data(&client, "foo").await?; for (idx, expected_value) in expected.into_iter().enumerate() { - let result: RedisValue = client.lindex("foo", idx as i64).await?; + let result: Value = client.lindex("foo", idx as i64).await?; assert_eq!(result, expected_value); } Ok(()) } -pub async fn should_linsert_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_linsert_values(client: Client, _: Config) -> Result<(), Error> { let result: usize = client.linsert("foo", ListLocation::Before, 1, 0).await?; assert_eq!(result, 0); let result: usize = client.llen("foo").await?; assert_eq!(result, 0); - client.lpush("foo", 0).await?; - let mut expected: Vec = vec!["0".into()]; + let _: () = client.lpush("foo", 0).await?; + let mut expected: Vec = vec!["0".into()]; for idx in 1 .. COUNT { let result: i64 = client.linsert("foo", ListLocation::After, idx - 1, idx).await?; assert_eq!(result, idx + 1); expected.push(idx.to_string().into()); } - let values: Vec = client.lrange("foo", 0, COUNT).await?; + let values: Vec = client.lrange("foo", 0, COUNT).await?; assert_eq!(values, expected); Ok(()) } -pub async fn should_lpop_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_lpop_values(client: Client, _: Config) -> Result<(), Error> { let expected = create_count_data(&client, "foo").await?; for idx in 0 .. COUNT { @@ -167,13 +170,13 @@ pub async fn should_lpop_values(client: RedisClient, _: RedisConfig) -> Result<( } let _ = create_count_data(&client, "foo").await?; - let result: Vec = client.lpop("foo", Some(COUNT as usize)).await?; + let result: Vec = client.lpop("foo", Some(COUNT as usize)).await?; assert_eq!(result, expected); Ok(()) } -pub async fn should_lpos_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_lpos_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo").await?; for idx in 0 .. COUNT { @@ -202,7 +205,7 @@ pub async fn should_lpos_values(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_lpush_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_lpush_values(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. COUNT { let result: i64 = client.lpush("foo", idx).await?; assert_eq!(result, idx + 1); @@ -215,11 +218,11 @@ pub async fn should_lpush_values(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_lpushx_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_lpushx_values(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.lpushx("foo", 0).await?; assert_eq!(result, 0); - client.lpush("foo", 0).await?; + let _: () = client.lpush("foo", 0).await?; for idx in 0 .. COUNT { let result: i64 = client.lpushx("foo", idx).await?; assert_eq!(result, idx + 2); @@ -232,10 +235,10 @@ pub async fn should_lpushx_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_lrange_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_lrange_values(client: Client, _: Config) -> Result<(), Error> { let expected = create_count_data(&client, "foo").await?; - let result: Vec = client.lrange("foo", 0, COUNT).await?; + let result: Vec = client.lrange("foo", 0, COUNT).await?; assert_eq!(result, expected); for idx in 0 .. COUNT { @@ -246,7 +249,7 @@ pub async fn should_lrange_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_lrem_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_lrem_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo").await?; for idx in 0 .. COUNT { let result: usize = client.lrem("foo", 1, idx).await?; @@ -267,41 +270,41 @@ pub async fn should_lrem_values(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_lset_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_lset_values(client: Client, _: Config) -> Result<(), Error> { assert!(client.lset::("foo", 1, 0).await.is_err()); let mut expected = create_count_data(&client, "foo").await?; expected.reverse(); for idx in 0 .. COUNT { - client.lset("foo", idx, COUNT - (idx + 1)).await?; + let _: () = client.lset("foo", idx, COUNT - (idx + 1)).await?; } - let result: Vec = client.lrange("foo", 0, COUNT).await?; + let result: Vec = client.lrange("foo", 0, COUNT).await?; assert_eq!(result, expected); Ok(()) } #[cfg(feature = "i-keys")] -pub async fn should_ltrim_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_ltrim_values(client: Client, _: Config) -> Result<(), Error> { let expected = create_count_data(&client, "foo").await?; - client.ltrim("foo", 0, COUNT).await?; - let result: Vec = client.lrange("foo", 0, COUNT).await?; + let _: () = client.ltrim("foo", 0, COUNT).await?; + let result: Vec = client.lrange("foo", 0, COUNT).await?; assert_eq!(result, expected); for idx in 0 .. COUNT { - client.ltrim("foo", 0, idx).await?; - let result: Vec = client.lrange("foo", 0, COUNT).await?; + let _: () = client.ltrim("foo", 0, idx).await?; + let result: Vec = client.lrange("foo", 0, COUNT).await?; assert_eq!(result, expected[0 .. (idx + 1) as usize]); - client.del("foo").await?; + let _: () = client.del("foo").await?; let _ = create_count_data(&client, "foo").await?; } Ok(()) } -pub async fn should_rpop_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_rpop_values(client: Client, _: Config) -> Result<(), Error> { let mut expected = create_count_data(&client, "foo").await?; expected.reverse(); @@ -311,13 +314,13 @@ pub async fn should_rpop_values(client: RedisClient, _: RedisConfig) -> Result<( } let _ = create_count_data(&client, "foo").await?; - let result: Vec = client.rpop("foo", Some(COUNT as usize)).await?; + let result: Vec = client.rpop("foo", Some(COUNT as usize)).await?; assert_eq!(result, expected); Ok(()) } -pub async fn should_rpoplpush_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_rpoplpush_values(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. COUNT { let result: i64 = client.lpush("foo{1}", idx).await?; assert_eq!(result, 1); @@ -330,7 +333,7 @@ pub async fn should_rpoplpush_values(client: RedisClient, _: RedisConfig) -> Res Ok(()) } -pub async fn should_lmove_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_lmove_values(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. COUNT { let result: i64 = client.lpush("foo{1}", idx).await?; assert_eq!(result, 1); @@ -345,7 +348,7 @@ pub async fn should_lmove_values(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_rpush_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_rpush_values(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. COUNT { let result: i64 = client.rpush("foo", idx).await?; assert_eq!(result, idx + 1); @@ -358,11 +361,11 @@ pub async fn should_rpush_values(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_rpushx_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_rpushx_values(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.rpushx("foo", 0).await?; assert_eq!(result, 0); - client.rpush("foo", 0).await?; + let _: () = client.rpush("foo", 0).await?; for idx in 0 .. COUNT { let result: i64 = client.rpushx("foo", idx).await?; assert_eq!(result, idx + 2); @@ -375,16 +378,16 @@ pub async fn should_rpushx_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_sort_int_list(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.lpush("foo", vec![1, 2, 3, 4, 5]).await?; +pub async fn should_sort_int_list(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.lpush("foo", vec![1, 2, 3, 4, 5]).await?; let sorted: Vec = client.sort("foo", None, None, (), None, false, None).await?; assert_eq!(sorted, vec![1, 2, 3, 4, 5]); Ok(()) } -pub async fn should_sort_alpha_list(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.lpush("foo", vec!["a", "b", "c", "d", "e"]).await?; +pub async fn should_sort_alpha_list(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.lpush("foo", vec!["a", "b", "c", "d", "e"]).await?; let sorted: Vec = client .sort("foo", None, None, (), Some(SortOrder::Desc), true, None) @@ -393,8 +396,8 @@ pub async fn should_sort_alpha_list(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_sort_int_list_with_limit(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.lpush("foo", vec![1, 2, 3, 4, 5]).await?; +pub async fn should_sort_int_list_with_limit(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.lpush("foo", vec![1, 2, 3, 4, 5]).await?; let sorted: Vec = client.sort("foo", None, Some((2, 2)), (), None, false, None).await?; assert_eq!(sorted, vec![3, 4]); @@ -402,14 +405,14 @@ pub async fn should_sort_int_list_with_limit(client: RedisClient, _: RedisConfig } #[cfg(feature = "i-keys")] -pub async fn should_sort_int_list_with_patterns(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_sort_int_list_with_patterns(client: Client, _: Config) -> Result<(), Error> { let vals: Vec = (1 .. 6).collect(); - let key: RedisKey = "foo".into(); + let key: Key = "foo".into(); - client.lpush(&key, vals.clone()).await?; + let _: () = client.lpush(&key, vals.clone()).await?; for val in vals.iter() { // reverse the weights - client + let _: () = client .set( format!("{}_weight_{}", key.as_str().unwrap(), val), 7 - *val, @@ -420,7 +423,7 @@ pub async fn should_sort_int_list_with_patterns(client: RedisClient, _: RedisCon .await?; } for val in vals.iter() { - client + let _: () = client .set( format!("{}_val_{}", key.as_str().unwrap(), val), *val * 2, @@ -448,8 +451,8 @@ pub async fn should_sort_int_list_with_patterns(client: RedisClient, _: RedisCon } #[cfg(feature = "replicas")] -pub async fn should_sort_ro_int_list(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.lpush("foo", vec![1, 2, 3, 4, 5]).await?; +pub async fn should_sort_ro_int_list(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.lpush("foo", vec![1, 2, 3, 4, 5]).await?; // wait for replicas to recv the command tokio::time::sleep(Duration::from_millis(500)).await; diff --git a/tests/integration/lua/mod.rs b/tests/integration/lua/mod.rs index 5af4033c..6615f696 100644 --- a/tests/integration/lua/mod.rs +++ b/tests/integration/lua/mod.rs @@ -1,7 +1,7 @@ use bytes::Bytes; use fred::{ prelude::*, - types::{FnPolicy, Function, Library, Script}, + types::scripts::{FnPolicy, Function, Library, Script}, util, }; use std::{ @@ -14,7 +14,7 @@ static ECHO_SCRIPT: &str = "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}"; static GET_SCRIPT: &str = "return redis.call('get', KEYS[1])"; #[cfg(feature = "sha-1")] -pub async fn load_script(client: &RedisClient, script: &str) -> Result { +pub async fn load_script(client: &Client, script: &str) -> Result { if client.is_clustered() { client.script_load_cluster(script).await } else { @@ -22,7 +22,7 @@ pub async fn load_script(client: &RedisClient, script: &str) -> Result Result<(), RedisError> { +pub async fn flush_scripts(client: &Client) -> Result<(), Error> { if client.is_clustered() { client.script_flush_cluster(false).await } else { @@ -31,7 +31,7 @@ pub async fn flush_scripts(client: &RedisClient) -> Result<(), RedisError> { } #[cfg(feature = "sha-1")] -pub async fn should_load_script(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_load_script(client: Client, _: Config) -> Result<(), Error> { let script_hash = util::sha1_hash(ECHO_SCRIPT); let hash: String = client.script_load(ECHO_SCRIPT).await?; assert_eq!(hash, script_hash); @@ -40,7 +40,7 @@ pub async fn should_load_script(client: RedisClient, _: RedisConfig) -> Result<( } #[cfg(feature = "sha-1")] -pub async fn should_load_script_cluster(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_load_script_cluster(client: Client, _: Config) -> Result<(), Error> { let script_hash = util::sha1_hash(ECHO_SCRIPT); let hash: String = client.script_load_cluster(ECHO_SCRIPT).await?; assert_eq!(hash, script_hash); @@ -49,7 +49,7 @@ pub async fn should_load_script_cluster(client: RedisClient, _: RedisConfig) -> } #[cfg(feature = "sha-1")] -pub async fn should_evalsha_echo_script(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_evalsha_echo_script(client: Client, _: Config) -> Result<(), Error> { let hash = load_script(&client, ECHO_SCRIPT).await?; let result: Vec = client.evalsha(hash, vec!["a{1}", "b{1}"], vec!["c{1}", "d{1}"]).await?; @@ -60,7 +60,7 @@ pub async fn should_evalsha_echo_script(client: RedisClient, _: RedisConfig) -> } #[cfg(feature = "sha-1")] -pub async fn should_evalsha_with_reload_echo_script(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_evalsha_with_reload_echo_script(client: Client, _: Config) -> Result<(), Error> { let script = Script::from_lua(ECHO_SCRIPT); let result: Vec = script @@ -73,7 +73,7 @@ pub async fn should_evalsha_with_reload_echo_script(client: RedisClient, _: Redi } #[cfg(feature = "sha-1")] -pub async fn should_evalsha_get_script(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_evalsha_get_script(client: Client, _: Config) -> Result<(), Error> { let script_hash = util::sha1_hash(GET_SCRIPT); let hash = load_script(&client, GET_SCRIPT).await?; assert_eq!(hash, script_hash); @@ -81,7 +81,7 @@ pub async fn should_evalsha_get_script(client: RedisClient, _: RedisConfig) -> R let result: Option = client.evalsha(&script_hash, vec!["foo"], ()).await?; assert!(result.is_none()); - client.set("foo", "bar", None, None, false).await?; + let _: () = client.set("foo", "bar", None, None, false).await?; let result: String = client.evalsha(&script_hash, vec!["foo"], ()).await?; assert_eq!(result, "bar"); @@ -89,7 +89,7 @@ pub async fn should_evalsha_get_script(client: RedisClient, _: RedisConfig) -> R Ok(()) } -pub async fn should_eval_echo_script(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_eval_echo_script(client: Client, _: Config) -> Result<(), Error> { let result: Vec = client .eval(ECHO_SCRIPT, vec!["a{1}", "b{1}"], vec!["c{1}", "d{1}"]) .await?; @@ -100,7 +100,7 @@ pub async fn should_eval_echo_script(client: RedisClient, _: RedisConfig) -> Res } #[cfg(feature = "sha-1")] -pub async fn should_eval_get_script(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_eval_get_script(client: Client, _: Config) -> Result<(), Error> { let result: Option = client.eval(GET_SCRIPT, vec!["foo"], ()).await?; assert!(result.is_none()); @@ -108,7 +108,7 @@ pub async fn should_eval_get_script(client: RedisClient, _: RedisConfig) -> Resu let result: Option = client.evalsha(&hash, vec!["foo"], ()).await?; assert!(result.is_none()); - client.set("foo", "bar", None, None, false).await?; + let _: () = client.set("foo", "bar", None, None, false).await?; let result: String = client.eval(GET_SCRIPT, vec!["foo"], ()).await?; assert_eq!(result, "bar"); @@ -119,7 +119,7 @@ pub async fn should_eval_get_script(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_function_load_scripts(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_function_load_scripts(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_fn = include_str!("../../scripts/lua/echo.lua"); @@ -129,84 +129,84 @@ pub async fn should_function_load_scripts(client: RedisClient, _: RedisConfig) - assert_eq!(echo, "echolib"); let getset: String = client.function_load(true, getset_fn).await?; assert_eq!(getset, "getsetlib"); - client.function_load_cluster(true, echo_fn).await?; + let _: () = client.function_load_cluster(true, echo_fn).await?; Ok(()) } -pub async fn should_function_dump_and_restore(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_function_dump_and_restore(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_fn = include_str!("../../scripts/lua/echo.lua"); - client.function_load_cluster(true, echo_fn).await?; + let _: () = client.function_load_cluster(true, echo_fn).await?; let fns: Bytes = client.function_dump().await?; - client.function_flush_cluster(false).await?; - client.function_restore_cluster(fns, FnPolicy::default()).await?; + let _: () = client.function_flush_cluster(false).await?; + let _: () = client.function_restore_cluster(fns, FnPolicy::default()).await?; - let mut fns: Vec> = client.function_list(Some("echolib"), false).await?; + let mut fns: Vec> = client.function_list(Some("echolib"), false).await?; assert_eq!(fns.len(), 1); let fns = fns.pop().expect("Failed to pop function"); - assert_eq!(fns.get("library_name"), Some(&RedisValue::String("echolib".into()))); + assert_eq!(fns.get("library_name"), Some(&Value::String("echolib".into()))); Ok(()) } -pub async fn should_function_flush(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_function_flush(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_fn = include_str!("../../scripts/lua/echo.lua"); - client.function_load_cluster(true, echo_fn).await?; - let fns: RedisValue = client.function_list(Some("echolib"), false).await?; + let _: () = client.function_load_cluster(true, echo_fn).await?; + let fns: Value = client.function_list(Some("echolib"), false).await?; assert!(!fns.is_null()); - client.function_flush_cluster(false).await?; - let fns: RedisValue = client.function_list(Some("echolib"), false).await?; + let _: () = client.function_flush_cluster(false).await?; + let fns: Value = client.function_list(Some("echolib"), false).await?; assert!(fns.is_null() || fns.array_len() == Some(0)); Ok(()) } -pub async fn should_function_delete(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_function_delete(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_fn = include_str!("../../scripts/lua/echo.lua"); - client.function_load_cluster(true, echo_fn).await?; - let fns: RedisValue = client.function_list(Some("echolib"), false).await?; + let _: () = client.function_load_cluster(true, echo_fn).await?; + let fns: Value = client.function_list(Some("echolib"), false).await?; assert!(!fns.is_null()); - client.function_delete_cluster("echolib").await?; - let fns: RedisValue = client.function_list(Some("echolib"), false).await?; + let _: () = client.function_delete_cluster("echolib").await?; + let fns: Value = client.function_list(Some("echolib"), false).await?; assert!(fns.is_null() || fns.array_len() == Some(0)); Ok(()) } -pub async fn should_function_list(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_function_list(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_fn = include_str!("../../scripts/lua/echo.lua"); - client.function_load_cluster(true, echo_fn).await?; + let _: () = client.function_load_cluster(true, echo_fn).await?; let getset_fn = include_str!("../../scripts/lua/getset.lua"); - client.function_load_cluster(true, getset_fn).await?; + let _: () = client.function_load_cluster(true, getset_fn).await?; - let mut fns: Vec> = client.function_list(Some("echolib"), false).await?; + let mut fns: Vec> = client.function_list(Some("echolib"), false).await?; assert_eq!(fns.len(), 1); let fns = fns.pop().expect("Failed to pop function"); - assert_eq!(fns.get("library_name"), Some(&RedisValue::String("echolib".into()))); + assert_eq!(fns.get("library_name"), Some(&Value::String("echolib".into()))); Ok(()) } -pub async fn should_function_list_multiple(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_function_list_multiple(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_fn = include_str!("../../scripts/lua/echo.lua"); - client.function_load_cluster(true, echo_fn).await?; + let _: () = client.function_load_cluster(true, echo_fn).await?; let getset_fn = include_str!("../../scripts/lua/getset.lua"); - client.function_load_cluster(true, getset_fn).await?; + let _: () = client.function_load_cluster(true, getset_fn).await?; - let fns: Vec> = client.function_list(None::, false).await?; + let fns: Vec> = client.function_list(None::, false).await?; // ordering is not deterministic, so convert to a set of library names let fns: BTreeSet = fns @@ -228,13 +228,13 @@ pub async fn should_function_list_multiple(client: RedisClient, _: RedisConfig) } #[cfg(feature = "i-keys")] -pub async fn should_function_fcall_getset(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_function_fcall_getset(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let getset_fn = include_str!("../../scripts/lua/getset.lua"); - client.function_load_cluster(true, getset_fn).await?; + let _: () = client.function_load_cluster(true, getset_fn).await?; - client.set("foo{1}", "bar", None, None, false).await?; + let _: () = client.set("foo{1}", "bar", None, None, false).await?; let old: String = client.fcall("getset", vec!["foo{1}"], vec!["baz"]).await?; assert_eq!(old, "bar"); let new: String = client.get("foo{1}").await?; @@ -243,11 +243,11 @@ pub async fn should_function_fcall_getset(client: RedisClient, _: RedisConfig) - Ok(()) } -pub async fn should_function_fcall_echo(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_function_fcall_echo(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_fn = include_str!("../../scripts/lua/echo.lua"); - client.function_load_cluster(true, echo_fn).await?; + let _: () = client.function_load_cluster(true, echo_fn).await?; let result: Vec = client .fcall("echo", vec!["key1{1}", "key2{1}"], vec!["arg1", "arg2"]) @@ -257,11 +257,11 @@ pub async fn should_function_fcall_echo(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_function_fcall_ro_echo(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_function_fcall_ro_echo(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_fn = include_str!("../../scripts/lua/echo.lua"); - client.function_load_cluster(true, echo_fn).await?; + let _: () = client.function_load_cluster(true, echo_fn).await?; let result: Vec = client .fcall_ro("echo", vec!["key1{1}", "key2{1}"], vec!["arg1", "arg2"]) @@ -272,14 +272,11 @@ pub async fn should_function_fcall_ro_echo(client: RedisClient, _: RedisConfig) } #[cfg(feature = "sha-1")] -pub async fn should_create_lua_script_helper_from_code( - client: RedisClient, - _: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_create_lua_script_helper_from_code(client: Client, _: Config) -> Result<(), Error> { let script = Script::from_lua(ECHO_SCRIPT); script.load(&client).await?; - let result: Vec = script + let result: Vec = script .evalsha(&client, vec!["foo{1}", "bar{1}"], vec!["3", "4"]) .await?; assert_eq!(result, vec!["foo{1}".into(), "bar{1}".into(), "3".into(), "4".into()]); @@ -287,21 +284,18 @@ pub async fn should_create_lua_script_helper_from_code( } #[cfg(feature = "sha-1")] -pub async fn should_create_lua_script_helper_from_hash( - client: RedisClient, - _: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_create_lua_script_helper_from_hash(client: Client, _: Config) -> Result<(), Error> { let hash: String = client.script_load_cluster(ECHO_SCRIPT).await?; let script = Script::from_hash(hash); - let result: Vec = script + let result: Vec = script .evalsha(&client, vec!["foo{1}", "bar{1}"], vec!["3", "4"]) .await?; assert_eq!(result, vec!["foo{1}".into(), "bar{1}".into(), "3".into(), "4".into()]); Ok(()) } -pub async fn should_create_function_from_code(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_create_function_from_code(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_lib = include_str!("../../scripts/lua/echo.lua"); @@ -309,20 +303,20 @@ pub async fn should_create_function_from_code(client: RedisClient, _: RedisConfi assert_eq!(lib.name().deref(), "echolib"); let func = lib.functions().get("echo").expect("Failed to read echo function"); - let result: Vec = func.fcall(&client, vec!["foo{1}", "bar{1}"], vec!["3", "4"]).await?; + let result: Vec = func.fcall(&client, vec!["foo{1}", "bar{1}"], vec!["3", "4"]).await?; assert_eq!(result, vec!["foo{1}".into(), "bar{1}".into(), "3".into(), "4".into()]); Ok(()) } -pub async fn should_create_function_from_name(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_create_function_from_name(client: Client, _: Config) -> Result<(), Error> { check_redis_7!(client); let echo_lib = include_str!("../../scripts/lua/echo.lua"); - client.function_load_cluster(true, echo_lib).await?; + let _: () = client.function_load_cluster(true, echo_lib).await?; let lib = Library::from_name(&client, "echolib").await?; let func = lib.functions().get("echo").expect("Failed to read echo function"); - let result: Vec = func.fcall(&client, vec!["foo{1}", "bar{1}"], vec!["3", "4"]).await?; + let result: Vec = func.fcall(&client, vec!["foo{1}", "bar{1}"], vec!["3", "4"]).await?; assert_eq!(result, vec!["foo{1}".into(), "bar{1}".into(), "3".into(), "4".into()]); Ok(()) } diff --git a/tests/integration/memory/mod.rs b/tests/integration/memory/mod.rs index 071491ef..10ff0fe2 100644 --- a/tests/integration/memory/mod.rs +++ b/tests/integration/memory/mod.rs @@ -1,29 +1,29 @@ use fred::{cmd, prelude::*, types::MemoryStats}; -pub async fn should_run_memory_doctor(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.memory_doctor().await?; +pub async fn should_run_memory_doctor(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.memory_doctor().await?; Ok(()) } -pub async fn should_run_memory_malloc_stats(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.memory_malloc_stats().await?; +pub async fn should_run_memory_malloc_stats(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.memory_malloc_stats().await?; Ok(()) } -pub async fn should_run_memory_purge(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.memory_purge().await?; +pub async fn should_run_memory_purge(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.memory_purge().await?; Ok(()) } -pub async fn should_run_memory_stats(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_run_memory_stats(client: Client, _: Config) -> Result<(), Error> { let stats: MemoryStats = client.memory_stats().await?; assert!(stats.total_allocated > 0); Ok(()) } -pub async fn should_run_memory_usage(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.custom(cmd!("SET"), vec!["foo", "bar"]).await?; +pub async fn should_run_memory_usage(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.custom(cmd!("SET"), vec!["foo", "bar"]).await?; assert!(client.memory_usage::("foo", None).await? > 0); Ok(()) diff --git a/tests/integration/multi/mod.rs b/tests/integration/multi/mod.rs index 5358eb32..524dece0 100644 --- a/tests/integration/multi/mod.rs +++ b/tests/integration/multi/mod.rs @@ -1,36 +1,36 @@ use fred::{ - clients::RedisClient, - error::RedisError, + clients::Client, + error::Error, interfaces::*, - types::{RedisConfig, RedisValue}, + types::{config::Config, Value}, }; -pub async fn should_run_get_set_trx(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_run_get_set_trx(client: Client, _config: Config) -> Result<(), Error> { let trx = client.multi(); - trx.set("foo", "bar", None, None, false).await?; - trx.get("foo").await?; + let _: () = trx.set("foo", "bar", None, None, false).await?; + let _: () = trx.get("foo").await?; let results: Vec = trx.exec(true).await?; assert_eq!(results, vec!["OK", "bar"]); Ok(()) } -pub async fn should_run_error_get_set_trx(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "bar", None, None, false).await?; +pub async fn should_run_error_get_set_trx(client: Client, _config: Config) -> Result<(), Error> { + let _: () = client.set("foo", "bar", None, None, false).await?; let trx = client.multi(); - trx.incr("foo").await?; - trx.exec(true).await?; + let _: () = trx.incr("foo").await?; + let _: Vec = trx.exec(true).await?; Ok(()) } -pub async fn should_fail_with_hashslot_error(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_fail_with_hashslot_error(client: Client, _config: Config) -> Result<(), Error> { let trx = client.multi(); - trx.set("foo", "bar", None, None, false).await?; - trx.set("bar", "baz", None, None, false).await?; - trx.exec(true).await?; + let _: () = trx.set("foo", "bar", None, None, false).await?; + let _: () = trx.set("bar", "baz", None, None, false).await?; + let _: Vec = trx.exec(true).await?; Ok(()) } diff --git a/tests/integration/other/mod.rs b/tests/integration/other/mod.rs index 9137edab..d862699c 100644 --- a/tests/integration/other/mod.rs +++ b/tests/integration/other/mod.rs @@ -1,23 +1,17 @@ use super::utils; use async_trait::async_trait; use fred::{ - clients::{RedisClient, RedisPool}, + clients::{Client, Pool}, cmd, - error::{RedisError, RedisErrorKind}, + error::{Error, ErrorKind}, interfaces::*, - prelude::{Blocking, RedisValue}, + prelude::{Blocking, Server, Value}, types::{ - BackpressureConfig, + config::{ClusterDiscoveryPolicy, Config, Options, PerformanceConfig, ServerConfig}, Builder, - ClientUnblockFlag, - ClusterDiscoveryPolicy, ClusterHash, - Options, - PerformanceConfig, - RedisConfig, - RedisKey, - RedisMap, - ServerConfig, + Key, + Map, }, }; use futures::future::try_join; @@ -37,22 +31,23 @@ use tokio::time::sleep; #[cfg(feature = "subscriber-client")] use fred::clients::SubscriberClient; -use fred::prelude::Server; #[cfg(feature = "credential-provider")] -use fred::types::CredentialProvider; +use fred::types::config::CredentialProvider; #[cfg(feature = "replicas")] -use fred::types::ReplicaConfig; +use fred::types::config::ReplicaConfig; +#[cfg(feature = "partial-tracing")] +use fred::types::config::TracingConfig; +#[cfg(feature = "i-client")] +use fred::types::ClientUnblockFlag; #[cfg(feature = "dns")] use fred::types::Resolve; -#[cfg(feature = "partial-tracing")] -use fred::types::TracingConfig; #[cfg(feature = "dns")] use hickory_resolver::{config::*, TokioAsyncResolver}; #[cfg(feature = "dns")] use std::net::{IpAddr, SocketAddr}; #[cfg(all(feature = "i-keys", feature = "i-hashes"))] -fn hash_to_btree(vals: &RedisMap) -> BTreeMap { +fn hash_to_btree(vals: &Map) -> BTreeMap { vals .iter() .map(|(key, value)| (key.clone(), value.as_u64().unwrap() as u16)) @@ -70,11 +65,11 @@ pub fn incr_atomic(size: &Arc) -> usize { } #[cfg(all(feature = "i-keys", feature = "i-hashes"))] -pub async fn should_smoke_test_from_redis_impl(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let nested_values: RedisMap = vec![("a", 1), ("b", 2)].try_into()?; - client.set("foo", "123", None, None, false).await?; - client.set("baz", "456", None, None, false).await?; - client.hset("bar", &nested_values).await?; +pub async fn should_smoke_test_from_value_impl(client: Client, _: Config) -> Result<(), Error> { + let nested_values: Map = vec![("a", 1), ("b", 2)].try_into()?; + let _: () = client.set("foo", "123", None, None, false).await?; + let _: () = client.set("baz", "456", None, None, false).await?; + let _: () = client.hset("bar", &nested_values).await?; let foo: usize = client.get("foo").await?; assert_eq!(foo, 123); @@ -89,8 +84,8 @@ pub async fn should_smoke_test_from_redis_impl(client: RedisClient, _: RedisConf let foo: BTreeSet = client.hvals("bar").await?; assert_eq!(foo, array_to_set(vec!["1".to_owned(), "2".to_owned()])); let foo: HashMap = client.hgetall("bar").await?; - assert_eq!(foo, RedisValue::Map(nested_values.clone()).convert()?); - let foo: BTreeMap = client.hgetall("bar").await?; + assert_eq!(foo, Value::Map(nested_values.clone()).convert()?); + let foo: BTreeMap = client.hgetall("bar").await?; assert_eq!(foo, hash_to_btree(&nested_values)); let foo: (String, i64) = client.mget(vec!["foo", "baz"]).await?; assert_eq!(foo, ("123".into(), 456)); @@ -101,27 +96,27 @@ pub async fn should_smoke_test_from_redis_impl(client: RedisClient, _: RedisConf } #[cfg(all(feature = "i-client", feature = "i-lists"))] -pub async fn should_automatically_unblock(_: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_automatically_unblock(_: Client, mut config: Config) -> Result<(), Error> { config.blocking = Blocking::Interrupt; - let client = RedisClient::new(config, None, None, None); + let client = Client::new(config, None, None, None); client.connect(); client.wait_for_connect().await?; let unblock_client = client.clone(); tokio::spawn(async move { sleep(Duration::from_secs(1)).await; - let _: () = unblock_client.ping().await.expect("Failed to ping"); + let _: () = unblock_client.ping(None).await.expect("Failed to ping"); }); let result = client.blpop::<(), _>("foo", 60.0).await; assert!(result.is_err()); - assert_ne!(*result.unwrap_err().kind(), RedisErrorKind::Timeout); + assert_ne!(*result.unwrap_err().kind(), ErrorKind::Timeout); Ok(()) } #[cfg(all(feature = "i-client", feature = "i-lists"))] -pub async fn should_manually_unblock(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let connections_ids = client.connection_ids().await; +pub async fn should_manually_unblock(client: Client, _: Config) -> Result<(), Error> { + let connections_ids = client.connection_ids(); let unblock_client = client.clone(); tokio::spawn(async move { @@ -136,14 +131,14 @@ pub async fn should_manually_unblock(client: RedisClient, _: RedisConfig) -> Res let result = client.blpop::<(), _>("foo", 60.0).await; assert!(result.is_err()); - assert_ne!(*result.unwrap_err().kind(), RedisErrorKind::Timeout); + assert_ne!(*result.unwrap_err().kind(), ErrorKind::Timeout); Ok(()) } #[cfg(all(feature = "i-client", feature = "i-lists"))] -pub async fn should_error_when_blocked(_: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_error_when_blocked(_: Client, mut config: Config) -> Result<(), Error> { config.blocking = Blocking::Error; - let client = RedisClient::new(config, None, None, None); + let client = Client::new(config, None, None, None); client.connect(); client.wait_for_connect().await?; let error_client = client.clone(); @@ -151,9 +146,9 @@ pub async fn should_error_when_blocked(_: RedisClient, mut config: RedisConfig) tokio::spawn(async move { sleep(Duration::from_secs(1)).await; - let result = error_client.ping::<()>().await; + let result = error_client.ping::<()>(None).await; assert!(result.is_err()); - assert_eq!(*result.unwrap_err().kind(), RedisErrorKind::InvalidCommand); + assert_eq!(*result.unwrap_err().kind(), ErrorKind::InvalidCommand); let _ = error_client.unblock_self(None).await; }); @@ -163,7 +158,7 @@ pub async fn should_error_when_blocked(_: RedisClient, mut config: RedisConfig) Ok(()) } -pub async fn should_split_clustered_connection(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_split_clustered_connection(client: Client, _config: Config) -> Result<(), Error> { let actual = client .split_cluster()? .iter() @@ -183,11 +178,11 @@ pub async fn should_split_clustered_connection(client: RedisClient, _config: Red } #[cfg(feature = "metrics")] -pub async fn should_track_size_stats(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_track_size_stats(client: Client, _config: Config) -> Result<(), Error> { let _ = client.take_res_size_metrics(); let _ = client.take_req_size_metrics(); - let _ = client + let _: () = client .set("foo", "abcdefghijklmnopqrstuvxyz", None, None, false) .await?; let req_stats = client.take_req_size_metrics(); @@ -206,11 +201,11 @@ pub async fn should_track_size_stats(client: RedisClient, _config: RedisConfig) } #[cfg(feature = "i-server")] -pub async fn should_run_flushall_cluster(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_run_flushall_cluster(client: Client, _: Config) -> Result<(), Error> { let count: i64 = 200; for idx in 0 .. count { - client + let _: () = client .custom(cmd!("SET"), vec![format!("foo-{}", idx), idx.to_string()]) .await?; } @@ -224,10 +219,7 @@ pub async fn should_run_flushall_cluster(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_safely_change_protocols_repeatedly( - client: RedisClient, - _: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_safely_change_protocols_repeatedly(client: Client, _: Config) -> Result<(), Error> { let done = Arc::new(RwLock::new(false)); let other = client.clone(); let other_done = done.clone(); @@ -235,9 +227,9 @@ pub async fn should_safely_change_protocols_repeatedly( let jh = tokio::spawn(async move { loop { if *other_done.read() { - return Ok::<_, RedisError>(()); + return Ok::<_, Error>(()); } - other.ping().await?; + let _: () = other.ping(None).await?; sleep(Duration::from_millis(10)).await; } }); @@ -260,17 +252,10 @@ pub async fn should_safely_change_protocols_repeatedly( // test to repro an intermittent race condition found while stress testing the client #[allow(dead_code)] #[cfg(feature = "i-keys")] -pub async fn should_test_high_concurrency_pool(_: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_test_high_concurrency_pool(_: Client, mut config: Config) -> Result<(), Error> { config.blocking = Blocking::Block; - let perf = PerformanceConfig { - auto_pipeline: true, - backpressure: BackpressureConfig { - max_in_flight_commands: 100_000_000, - ..Default::default() - }, - ..Default::default() - }; - let pool = RedisPool::new(config, Some(perf), None, None, 28)?; + let perf = PerformanceConfig::default(); + let pool = Pool::new(config, Some(perf), None, None, 28)?; pool.connect(); pool.wait_for_connect().await?; @@ -290,15 +275,15 @@ pub async fn should_test_high_concurrency_pool(_: RedisClient, mut config: Redis let actual: i64 = client.incr(&key).await?; expected += 1; if actual != expected { - return Err(RedisError::new( - RedisErrorKind::Unknown, + return Err(Error::new( + ErrorKind::Unknown, format!("Expected {}, found {}", expected, actual), )); } } // println!("Task {} finished.", idx); - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) })); } let _ = futures::future::try_join_all(tasks).await?; @@ -307,14 +292,14 @@ pub async fn should_test_high_concurrency_pool(_: RedisClient, mut config: Redis } #[cfg(feature = "i-keys")] -pub async fn should_pipeline_all(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pipeline_all(client: Client, _: Config) -> Result<(), Error> { let pipeline = client.pipeline(); - let result: RedisValue = pipeline.set("foo", 1, None, None, false).await?; + let result: Value = pipeline.set("foo", 1, None, None, false).await?; assert!(result.is_queued()); - let result: RedisValue = pipeline.set("bar", 2, None, None, false).await?; + let result: Value = pipeline.set("bar", 2, None, None, false).await?; assert!(result.is_queued()); - let result: RedisValue = pipeline.incr("foo").await?; + let result: Value = pipeline.incr("foo").await?; assert!(result.is_queued()); let result: ((), (), i64) = pipeline.all().await?; @@ -323,19 +308,19 @@ pub async fn should_pipeline_all(client: RedisClient, _: RedisConfig) -> Result< } #[cfg(all(feature = "i-keys", feature = "i-hashes"))] -pub async fn should_pipeline_all_error_early(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pipeline_all_error_early(client: Client, _: Config) -> Result<(), Error> { let pipeline = client.pipeline(); - let result: RedisValue = pipeline.set("foo", 1, None, None, false).await?; + let result: Value = pipeline.set("foo", 1, None, None, false).await?; assert!(result.is_queued()); - let result: RedisValue = pipeline.hgetall("foo").await?; + let result: Value = pipeline.hgetall("foo").await?; assert!(result.is_queued()); - let result: RedisValue = pipeline.incr("foo").await?; + let result: Value = pipeline.incr("foo").await?; assert!(result.is_queued()); - if let Err(e) = pipeline.all::().await { + if let Err(e) = pipeline.all::().await { // make sure we get the expected error from the server rather than a parsing error - assert_eq!(*e.kind(), RedisErrorKind::InvalidArgument); + assert_eq!(*e.kind(), ErrorKind::InvalidArgument); } else { panic!("Expected pipeline error."); } @@ -344,14 +329,14 @@ pub async fn should_pipeline_all_error_early(client: RedisClient, _: RedisConfig } #[cfg(feature = "i-keys")] -pub async fn should_pipeline_last(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pipeline_last(client: Client, _: Config) -> Result<(), Error> { let pipeline = client.pipeline(); - let result: RedisValue = pipeline.set("foo", 1, None, None, false).await?; + let result: Value = pipeline.set("foo", 1, None, None, false).await?; assert!(result.is_queued()); - let result: RedisValue = pipeline.set("bar", 2, None, None, false).await?; + let result: Value = pipeline.set("bar", 2, None, None, false).await?; assert!(result.is_queued()); - let result: RedisValue = pipeline.incr("foo").await?; + let result: Value = pipeline.incr("foo").await?; assert!(result.is_queued()); let result: i64 = pipeline.last().await?; @@ -360,11 +345,11 @@ pub async fn should_pipeline_last(client: RedisClient, _: RedisConfig) -> Result } #[cfg(all(feature = "i-keys", feature = "i-hashes"))] -pub async fn should_pipeline_try_all(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pipeline_try_all(client: Client, _: Config) -> Result<(), Error> { let pipeline = client.pipeline(); - pipeline.incr("foo").await?; - pipeline.hgetall("foo").await?; + let _: () = pipeline.incr("foo").await?; + let _: () = pipeline.hgetall("foo").await?; let results = pipeline.try_all::().await; assert_eq!(results[0].clone().unwrap(), 1); @@ -374,21 +359,21 @@ pub async fn should_pipeline_try_all(client: RedisClient, _: RedisConfig) -> Res } #[cfg(feature = "i-server")] -pub async fn should_use_all_cluster_nodes_repeatedly(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_use_all_cluster_nodes_repeatedly(client: Client, _: Config) -> Result<(), Error> { let other = client.clone(); let jh1 = tokio::spawn(async move { for _ in 0 .. 200 { other.flushall_cluster().await?; } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); let jh2 = tokio::spawn(async move { for _ in 0 .. 200 { client.flushall_cluster().await?; } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); let _ = try_join(jh1, jh2).await?; @@ -396,10 +381,10 @@ pub async fn should_use_all_cluster_nodes_repeatedly(client: RedisClient, _: Red } #[cfg(all(feature = "partial-tracing", feature = "i-keys"))] -pub async fn should_use_tracing_get_set(client: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_use_tracing_get_set(client: Client, mut config: Config) -> Result<(), Error> { config.tracing = TracingConfig::new(true); let (perf, policy) = (client.perf_config(), client.client_reconnect_policy()); - let client = RedisClient::new(config, Some(perf), None, policy); + let client = Client::new(config, Some(perf), None, policy); let _ = client.connect(); let _ = client.wait_for_connect().await?; @@ -460,25 +445,25 @@ pub async fn should_use_tracing_get_set(client: RedisClient, mut config: RedisCo // } #[cfg(feature = "subscriber-client")] -pub async fn should_ping_with_subscriber_client(client: RedisClient, config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_ping_with_subscriber_client(client: Client, config: Config) -> Result<(), Error> { let (perf, policy) = (client.perf_config(), client.client_reconnect_policy()); let client = SubscriberClient::new(config, Some(perf), None, policy); let _ = client.connect(); let _ = client.wait_for_connect().await?; - let _: () = client.ping().await?; + let _: () = client.ping(None).await?; let _: () = client.subscribe("foo").await?; - let _: () = client.ping().await?; + let _: () = client.ping(None).await?; let _ = client.quit().await?; Ok(()) } #[cfg(all(feature = "replicas", feature = "i-keys"))] -pub async fn should_replica_set_and_get(client: RedisClient, config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_replica_set_and_get(client: Client, config: Config) -> Result<(), Error> { let policy = client.client_reconnect_policy(); let mut connection = client.connection_config().clone(); connection.replica = ReplicaConfig::default(); - let client = RedisClient::new(config, None, Some(connection), policy); + let client = Client::new(config, None, Some(connection), policy); client.init().await?; let _: () = client.set("foo", "bar", None, None, false).await?; @@ -489,11 +474,11 @@ pub async fn should_replica_set_and_get(client: RedisClient, config: RedisConfig } #[cfg(all(feature = "replicas", feature = "i-keys"))] -pub async fn should_replica_set_and_get_not_lazy(client: RedisClient, config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_replica_set_and_get_not_lazy(client: Client, config: Config) -> Result<(), Error> { let policy = client.client_reconnect_policy(); let mut connection = client.connection_config().clone(); connection.replica.lazy_connections = false; - let client = RedisClient::new(config, None, Some(connection), policy); + let client = Client::new(config, None, Some(connection), policy); client.init().await?; let _: () = client.set("foo", "bar", None, None, false).await?; @@ -504,7 +489,7 @@ pub async fn should_replica_set_and_get_not_lazy(client: RedisClient, config: Re } #[cfg(all(feature = "replicas", feature = "i-keys"))] -pub async fn should_pipeline_with_replicas(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pipeline_with_replicas(client: Client, _: Config) -> Result<(), Error> { let _: () = client.set("foo", 1, None, None, false).await?; let _: () = client.set("bar", 2, None, None, false).await?; @@ -518,10 +503,7 @@ pub async fn should_pipeline_with_replicas(client: RedisClient, _: RedisConfig) } #[cfg(all(feature = "replicas", feature = "i-keys"))] -pub async fn should_use_cluster_replica_without_redirection( - client: RedisClient, - config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_use_cluster_replica_without_redirection(client: Client, config: Config) -> Result<(), Error> { let mut connection = client.connection_config().clone(); connection.replica = ReplicaConfig { lazy_connections: true, @@ -532,9 +514,9 @@ pub async fn should_use_cluster_replica_without_redirection( connection.max_redirections = 0; let policy = client.client_reconnect_policy(); - let client = RedisClient::new(config, None, Some(connection), policy); + let client = Client::new(config, None, Some(connection), policy); let _ = client.connect(); - let _ = client.wait_for_connect().await?; + client.wait_for_connect().await?; let _: () = client.replicas().get("foo").await?; let _: () = client.incr("foo").await?; @@ -542,20 +524,20 @@ pub async fn should_use_cluster_replica_without_redirection( Ok(()) } -pub async fn should_gracefully_quit(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_gracefully_quit(client: Client, _: Config) -> Result<(), Error> { let client = client.clone_new(); let connection = client.connect(); client.wait_for_connect().await?; - client.ping().await?; - client.quit().await?; + let _: () = client.ping(None).await?; + let _: () = client.quit().await?; let _ = connection.await; Ok(()) } #[cfg(feature = "i-lists")] -pub async fn should_support_options_with_pipeline(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_support_options_with_pipeline(client: Client, _: Config) -> Result<(), Error> { let options = Options { timeout: Some(Duration::from_millis(100)), max_attempts: Some(42), @@ -564,25 +546,25 @@ pub async fn should_support_options_with_pipeline(client: RedisClient, _: RedisC }; let pipeline = client.pipeline().with_options(&options); - pipeline.blpop("foo", 2.0).await?; - let results = pipeline.try_all::().await; - assert_eq!(results[0].clone().unwrap_err().kind(), &RedisErrorKind::Timeout); + let _: () = pipeline.blpop("foo", 2.0).await?; + let results = pipeline.try_all::().await; + assert_eq!(results[0].clone().unwrap_err().kind(), &ErrorKind::Timeout); Ok(()) } #[cfg(feature = "i-keys")] -pub async fn should_reuse_pipeline(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_reuse_pipeline(client: Client, _: Config) -> Result<(), Error> { let pipeline = client.pipeline(); - pipeline.incr("foo").await?; - pipeline.incr("foo").await?; + let _: () = pipeline.incr("foo").await?; + let _: () = pipeline.incr("foo").await?; assert_eq!(pipeline.last::().await?, 2); assert_eq!(pipeline.last::().await?, 4); Ok(()) } #[cfg(all(feature = "transactions", feature = "i-keys"))] -pub async fn should_support_options_with_trx(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_support_options_with_trx(client: Client, _: Config) -> Result<(), Error> { let options = Options { max_attempts: Some(1), timeout: Some(Duration::from_secs(1)), @@ -590,10 +572,10 @@ pub async fn should_support_options_with_trx(client: RedisClient, _: RedisConfig }; let trx = client.multi().with_options(&options); - trx.get("foo{1}").await?; - trx.set("foo{1}", "bar", None, None, false).await?; - trx.get("foo{1}").await?; - let (first, second, third): (Option, bool, String) = trx.exec(true).await?; + let _: () = trx.get("foo{1}").await?; + let _: () = trx.set("foo{1}", "bar", None, None, false).await?; + let _: () = trx.get("foo{1}").await?; + let (first, second, third): (Option, bool, String) = trx.exec(true).await?; assert_eq!(first, None); assert!(second); @@ -602,14 +584,13 @@ pub async fn should_support_options_with_trx(client: RedisClient, _: RedisConfig } #[cfg(all(feature = "transactions", feature = "i-keys"))] -pub async fn should_pipeline_transaction(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.incr("foo{1}").await?; - client.incr("bar{1}").await?; +pub async fn should_pipeline_transaction(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.incr("foo{1}").await?; + let _: () = client.incr("bar{1}").await?; let trx = client.multi(); - trx.pipeline(true); - trx.get("foo{1}").await?; - trx.incr("bar{1}").await?; + let _: () = trx.get("foo{1}").await?; + let _: () = trx.incr("bar{1}").await?; let (foo, bar): (i64, i64) = trx.exec(true).await?; assert_eq!((foo, bar), (1, 2)); @@ -617,18 +598,17 @@ pub async fn should_pipeline_transaction(client: RedisClient, _: RedisConfig) -> } #[cfg(all(feature = "transactions", feature = "i-keys", feature = "i-hashes"))] -pub async fn should_fail_pipeline_transaction_error(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.incr("foo{1}").await?; - client.incr("bar{1}").await?; +pub async fn should_fail_pipeline_transaction_error(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.incr("foo{1}").await?; + let _: () = client.incr("bar{1}").await?; let trx = client.multi(); - trx.pipeline(true); - trx.get("foo{1}").await?; - trx.hgetall("bar{1}").await?; - trx.get("foo{1}").await?; + let _: () = trx.get("foo{1}").await?; + let _: () = trx.hgetall("bar{1}").await?; + let _: () = trx.get("foo{1}").await?; - if let Err(e) = trx.exec::(false).await { - assert_eq!(*e.kind(), RedisErrorKind::InvalidArgument); + if let Err(e) = trx.exec::(false).await { + assert_eq!(*e.kind(), ErrorKind::InvalidArgument); } else { panic!("Expected error from transaction."); } @@ -637,7 +617,7 @@ pub async fn should_fail_pipeline_transaction_error(client: RedisClient, _: Redi } #[cfg(all(feature = "i-keys", feature = "i-lists"))] -pub async fn should_manually_connect_twice(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_manually_connect_twice(client: Client, _: Config) -> Result<(), Error> { let client = client.clone_new(); let _old_connection = client.connect(); client.wait_for_connect().await?; @@ -657,23 +637,17 @@ pub async fn should_manually_connect_twice(client: RedisClient, _: RedisConfig) Ok(()) } -pub async fn pool_should_connect_correctly_via_init_interface( - _: RedisClient, - config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn pool_should_connect_correctly_via_init_interface(_: Client, config: Config) -> Result<(), Error> { let pool = Builder::from_config(config).build_pool(5)?; let task = pool.init().await?; - pool.ping().await?; - pool.quit().await?; + let _: () = pool.ping(None).await?; + let _: () = pool.quit().await?; task.await??; Ok(()) } -pub async fn pool_should_fail_with_bad_host_via_init_interface( - _: RedisClient, - mut config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn pool_should_fail_with_bad_host_via_init_interface(_: Client, mut config: Config) -> Result<(), Error> { config.fail_fast = true; config.server = ServerConfig::new_centralized("incorrecthost", 1234); let pool = Builder::from_config(config).build_pool(5)?; @@ -681,24 +655,18 @@ pub async fn pool_should_fail_with_bad_host_via_init_interface( Ok(()) } -pub async fn pool_should_connect_correctly_via_wait_interface( - _: RedisClient, - config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn pool_should_connect_correctly_via_wait_interface(_: Client, config: Config) -> Result<(), Error> { let pool = Builder::from_config(config).build_pool(5)?; let task = pool.connect(); pool.wait_for_connect().await?; - pool.ping().await?; - pool.quit().await?; + let _: () = pool.ping(None).await?; + let _: () = pool.quit().await?; task.await??; Ok(()) } -pub async fn pool_should_fail_with_bad_host_via_wait_interface( - _: RedisClient, - mut config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn pool_should_fail_with_bad_host_via_wait_interface(_: Client, mut config: Config) -> Result<(), Error> { config.fail_fast = true; config.server = ServerConfig::new_centralized("incorrecthost", 1234); let pool = Builder::from_config(config).build_pool(5)?; @@ -709,23 +677,17 @@ pub async fn pool_should_fail_with_bad_host_via_wait_interface( Ok(()) } -pub async fn should_connect_correctly_via_init_interface( - _: RedisClient, - config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_connect_correctly_via_init_interface(_: Client, config: Config) -> Result<(), Error> { let client = Builder::from_config(config).build()?; let task = client.init().await?; - client.ping().await?; - client.quit().await?; + let _: () = client.ping(None).await?; + let _: () = client.quit().await?; task.await??; Ok(()) } -pub async fn should_fail_with_bad_host_via_init_interface( - _: RedisClient, - mut config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_fail_with_bad_host_via_init_interface(_: Client, mut config: Config) -> Result<(), Error> { config.fail_fast = true; config.server = ServerConfig::new_centralized("incorrecthost", 1234); let client = Builder::from_config(config).build()?; @@ -733,24 +695,18 @@ pub async fn should_fail_with_bad_host_via_init_interface( Ok(()) } -pub async fn should_connect_correctly_via_wait_interface( - _: RedisClient, - config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_connect_correctly_via_wait_interface(_: Client, config: Config) -> Result<(), Error> { let client = Builder::from_config(config).build()?; let task = client.connect(); client.wait_for_connect().await?; - client.ping().await?; - client.quit().await?; + let _: () = client.ping(None).await?; + let _: () = client.quit().await?; task.await??; Ok(()) } -pub async fn should_fail_with_bad_host_via_wait_interface( - _: RedisClient, - mut config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_fail_with_bad_host_via_wait_interface(_: Client, mut config: Config) -> Result<(), Error> { config.fail_fast = true; config.server = ServerConfig::new_centralized("incorrecthost", 1234); let client = Builder::from_config(config).build()?; @@ -761,12 +717,8 @@ pub async fn should_fail_with_bad_host_via_wait_interface( Ok(()) } -// TODO this will require a breaking change to support. The `Replicas` struct assumes that it's operating on a -// `RedisClient` and is not generic for other client or decorator types. `Replicas` must become `Replicas` first. -#[allow(dead_code)] #[cfg(all(feature = "replicas", feature = "i-keys"))] -pub async fn should_combine_options_and_replicas(client: RedisClient, config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_combine_options_and_replicas(client: Client, config: Config) -> Result<(), Error> { let mut connection = client.connection_config().clone(); connection.replica = ReplicaConfig { lazy_connections: true, @@ -776,12 +728,12 @@ pub async fn should_combine_options_and_replicas(client: RedisClient, config: Re }; connection.max_redirections = 0; let policy = client.client_reconnect_policy(); - let client = RedisClient::new(config, None, Some(connection), policy); + let client = Client::new(config, None, Some(connection), policy); client.init().await?; // change the cluster hash policy such that we get a routing error if both replicas and options are correctly // applied - let key = RedisKey::from_static_str("foo"); + let key = Key::from_static_str("foo"); let (servers, foo_owner) = client .cached_cluster_state() .map(|s| { @@ -791,6 +743,9 @@ pub async fn should_combine_options_and_replicas(client: RedisClient, config: Re ) }) .unwrap(); + // in this case the caller has specified the wrong cluster owner node, and none of the replica connections have been + // created since lazy_connections is true. the client should check whether the provided node matches the primary or + // any of the replicas, and if not it should return an error early that the command is not routable. let wrong_owner = servers.iter().find(|s| foo_owner != **s).unwrap().clone(); let options = Options { @@ -808,12 +763,61 @@ pub async fn should_combine_options_and_replicas(client: RedisClient, config: Re .err() .unwrap(); - // not ideal - assert_eq!(error.details(), "Too many redirections."); + assert_eq!(*error.kind(), ErrorKind::Routing); Ok(()) } -pub async fn should_fail_on_centralized_connect(_: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { +#[cfg(all(feature = "replicas", feature = "i-keys"))] +pub async fn should_combine_options_and_replicas_non_lazy(client: Client, config: Config) -> Result<(), Error> { + let mut connection = client.connection_config().clone(); + connection.replica = ReplicaConfig { + lazy_connections: false, + primary_fallback: false, + ignore_reconnection_errors: false, + ..ReplicaConfig::default() + }; + connection.max_redirections = 0; + let policy = client.client_reconnect_policy(); + let client = Client::new(config, None, Some(connection), policy); + client.init().await?; + + // change the cluster hash policy such that we get a routing error if both replicas and options are correctly + // applied + let key = Key::from_static_str("foo"); + let (servers, foo_owner) = client + .cached_cluster_state() + .map(|s| { + ( + s.unique_primary_nodes(), + s.get_server(key.cluster_hash()).unwrap().clone(), + ) + }) + .unwrap(); + // in this case since all the connections are created the client will route to a replica of the wrong primary node, + // receiving a MOVED redirection in response. since the max redirections is zero the client should return a "too + // many redirections" error. + let wrong_owner = servers.iter().find(|s| foo_owner != **s).unwrap().clone(); + + let options = Options { + max_redirections: Some(0), + max_attempts: Some(1), + cluster_node: Some(wrong_owner), + ..Default::default() + }; + + let error = client + .with_options(&options) + .replicas() + .get::, _>(key) + .await + .err() + .unwrap(); + + assert_eq!(*error.kind(), ErrorKind::Routing); + Ok(()) +} + +pub async fn should_fail_on_centralized_connect(_: Client, mut config: Config) -> Result<(), Error> { if let ServerConfig::Centralized { server } = config.server { config.server = ServerConfig::Clustered { hosts: vec![server], @@ -824,15 +828,15 @@ pub async fn should_fail_on_centralized_connect(_: RedisClient, mut config: Redi return Ok(()); } - let client = RedisClient::new(config, None, None, None); + let client = Client::new(config, None, None, None); client.connect(); if let Err(err) = client.wait_for_connect().await { - assert_eq!(*err.kind(), RedisErrorKind::Config, "err = {:?}", err); + assert_eq!(*err.kind(), ErrorKind::Config, "err = {:?}", err); return Ok(()); } - Err(RedisError::new(RedisErrorKind::Unknown, "Expected a config error.")) + Err(Error::new(ErrorKind::Unknown, "Expected a config error.")) } #[derive(Debug, Default)] @@ -842,13 +846,13 @@ pub struct FakeCreds {} #[async_trait] #[cfg(feature = "credential-provider")] impl CredentialProvider for FakeCreds { - async fn fetch(&self, _: Option<&Server>) -> Result<(Option, Option), RedisError> { + async fn fetch(&self, _: Option<&Server>) -> Result<(Option, Option), Error> { use super::utils::{read_redis_password, read_redis_username}; Ok((Some(read_redis_username()), Some(read_redis_password()))) } } #[cfg(feature = "credential-provider")] -pub async fn should_use_credential_provider(_client: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_use_credential_provider(_client: Client, mut config: Config) -> Result<(), Error> { let (perf, connection) = (_client.perf_config(), _client.connection_config().clone()); config.username = None; config.password = None; @@ -859,21 +863,44 @@ pub async fn should_use_credential_provider(_client: RedisClient, mut config: Re .build()?; client.init().await?; - client.ping().await?; - client.quit().await?; + let _: () = client.ping(None).await?; + let _: () = client.quit().await?; Ok(()) } #[cfg(feature = "i-pubsub")] -pub async fn should_exit_event_task_with_error(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let task = client.on_message(|_| Err(RedisError::new_canceled())); - client.subscribe("foo").await?; +pub async fn should_exit_event_task_with_error(client: Client, _: Config) -> Result<(), Error> { + let task = client.on_message(|_| async { Err(Error::new_canceled()) }); + let _: () = client.subscribe("foo").await?; let publisher = client.clone_new(); publisher.init().await?; - publisher.publish("foo", "bar").await?; + let _: () = publisher.publish("foo", "bar").await?; let result = task.await.unwrap(); - assert_eq!(result, Err(RedisError::new_canceled())); + assert_eq!(result, Err(Error::new_canceled())); + Ok(()) +} + +#[cfg(feature = "replicas")] +pub async fn should_create_non_lazy_replica_connections(client: Client, config: Config) -> Result<(), Error> { + if !config.server.is_clustered() { + return Ok(()); + } + + let mut connection_config = client.connection_config().clone(); + connection_config.replica = ReplicaConfig { + lazy_connections: false, + primary_fallback: true, + ..Default::default() + }; + + let client = Builder::from_config(config) + .set_performance_config(client.perf_config()) + .set_connection_config(connection_config) + .build()?; + client.init().await?; + + assert_eq!(client.active_connections().len(), 6); Ok(()) } diff --git a/tests/integration/pool/mod.rs b/tests/integration/pool/mod.rs index 92ca9a3a..56c51760 100644 --- a/tests/integration/pool/mod.rs +++ b/tests/integration/pool/mod.rs @@ -1,44 +1,38 @@ use fred::{ - clients::{RedisClient, RedisPool}, - error::RedisError, + clients::{Client, Pool}, + error::Error, interfaces::*, - types::RedisConfig, + types::config::Config, }; #[cfg(feature = "i-keys")] -use fred::types::{Builder, ReconnectPolicy}; +use fred::types::{config::ReconnectPolicy, Builder}; #[cfg(feature = "i-keys")] use futures::future::try_join_all; -async fn create_and_ping_pool(config: &RedisConfig, count: usize) -> Result<(), RedisError> { - let pool = RedisPool::new(config.clone(), None, None, None, count)?; +async fn create_and_ping_pool(config: &Config, count: usize) -> Result<(), Error> { + let pool = Pool::new(config.clone(), None, None, None, count)?; pool.init().await?; for client in pool.clients().iter() { - client.ping().await?; + let _: () = client.ping(None).await?; } - pool.ping().await?; - pool.quit().await?; + let _: () = pool.ping(None).await?; + let _: () = pool.quit().await?; Ok(()) } -pub async fn should_connect_and_ping_static_pool_single_conn( - _: RedisClient, - config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_connect_and_ping_static_pool_single_conn(_: Client, config: Config) -> Result<(), Error> { create_and_ping_pool(&config, 1).await } -pub async fn should_connect_and_ping_static_pool_two_conn( - _: RedisClient, - config: RedisConfig, -) -> Result<(), RedisError> { +pub async fn should_connect_and_ping_static_pool_two_conn(_: Client, config: Config) -> Result<(), Error> { create_and_ping_pool(&config, 2).await } #[cfg(feature = "i-keys")] -pub async fn should_incr_exclusive_pool(client: RedisClient, config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_incr_exclusive_pool(client: Client, config: Config) -> Result<(), Error> { let perf = client.perf_config(); let policy = client .client_reconnect_policy() @@ -53,10 +47,10 @@ pub async fn should_incr_exclusive_pool(client: RedisClient, config: RedisConfig for _ in 0 .. 10 { let client = pool.acquire().await; - client.incr("foo").await?; + let _: () = client.incr("foo").await?; } assert_eq!(client.get::("foo").await?, 10); - client.del("foo").await?; + let _: () = client.del("foo").await?; let mut fts = Vec::with_capacity(10); for _ in 0 .. 10 { @@ -73,7 +67,7 @@ pub async fn should_incr_exclusive_pool(client: RedisClient, config: RedisConfig } #[cfg(all(feature = "i-keys", feature = "transactions"))] -pub async fn should_watch_and_trx_exclusive_pool(client: RedisClient, config: RedisConfig) -> Result<(), RedisError> { +pub async fn should_watch_and_trx_exclusive_pool(client: Client, config: Config) -> Result<(), Error> { let perf = client.perf_config(); let policy = client .client_reconnect_policy() @@ -86,7 +80,7 @@ pub async fn should_watch_and_trx_exclusive_pool(client: RedisClient, config: Re .build_exclusive_pool(5)?; pool.init().await?; - client.set("foo{1}", 1, None, None, false).await?; + let _: () = client.set("foo{1}", 1, None, None, false).await?; let results: Option<(i64, i64, i64)> = { let client = pool.acquire().await; @@ -94,9 +88,9 @@ pub async fn should_watch_and_trx_exclusive_pool(client: RedisClient, config: Re client.watch("foo").await?; if let Some(1) = client.get::, _>("foo{1}").await? { let trx = client.multi(); - trx.incr("foo{1}").await?; - trx.incr("bar{1}").await?; - trx.incr("baz{1}").await?; + let _: () = trx.incr("foo{1}").await?; + let _: () = trx.incr("bar{1}").await?; + let _: () = trx.incr("baz{1}").await?; Some(trx.exec(true).await?) } else { None diff --git a/tests/integration/pubsub/mod.rs b/tests/integration/pubsub/mod.rs index 09ecf316..69dea59c 100644 --- a/tests/integration/pubsub/mod.rs +++ b/tests/integration/pubsub/mod.rs @@ -17,7 +17,7 @@ async fn wait_a_sec() { tokio::time::sleep(Duration::from_millis(20)).await; } -pub async fn should_publish_and_recv_messages(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_publish_and_recv_messages(client: Client, _: Config) -> Result<(), Error> { let subscriber_client = client.clone_new(); subscriber_client.connect(); subscriber_client.wait_for_connect().await?; @@ -35,13 +35,13 @@ pub async fn should_publish_and_recv_messages(client: RedisClient, _: RedisConfi } } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); sleep(Duration::from_secs(1)).await; for idx in 0 .. NUM_MESSAGES { // https://redis.io/commands/publish#return-value - client.publish(CHANNEL1, format!("{}-{}", FAKE_MESSAGE, idx)).await?; + let _: () = client.publish(CHANNEL1, format!("{}-{}", FAKE_MESSAGE, idx)).await?; // pubsub messages may arrive out of order due to cross-cluster broadcasting sleep(Duration::from_millis(50)).await; @@ -51,7 +51,7 @@ pub async fn should_publish_and_recv_messages(client: RedisClient, _: RedisConfi Ok(()) } -pub async fn should_ssubscribe_and_recv_messages(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_ssubscribe_and_recv_messages(client: Client, _: Config) -> Result<(), Error> { let subscriber_client = client.clone_new(); subscriber_client.connect(); subscriber_client.wait_for_connect().await?; @@ -69,13 +69,13 @@ pub async fn should_ssubscribe_and_recv_messages(client: RedisClient, _: RedisCo } } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); sleep(Duration::from_secs(1)).await; for idx in 0 .. NUM_MESSAGES { // https://redis.io/commands/publish#return-value - client.spublish(CHANNEL1, format!("{}-{}", FAKE_MESSAGE, idx)).await?; + let _: () = client.spublish(CHANNEL1, format!("{}-{}", FAKE_MESSAGE, idx)).await?; // pubsub messages may arrive out of order due to cross-cluster broadcasting sleep(Duration::from_millis(50)).await; @@ -85,7 +85,7 @@ pub async fn should_ssubscribe_and_recv_messages(client: RedisClient, _: RedisCo Ok(()) } -pub async fn should_psubscribe_and_recv_messages(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_psubscribe_and_recv_messages(client: Client, _: Config) -> Result<(), Error> { let channels = vec![CHANNEL1, CHANNEL2, CHANNEL3]; let subscriber_channels = channels.clone(); @@ -106,7 +106,7 @@ pub async fn should_psubscribe_and_recv_messages(client: RedisClient, _: RedisCo } } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); sleep(Duration::from_secs(1)).await; @@ -114,7 +114,7 @@ pub async fn should_psubscribe_and_recv_messages(client: RedisClient, _: RedisCo let channel = channels[idx as usize % channels.len()]; // https://redis.io/commands/publish#return-value - client.publish(channel, format!("{}-{}", FAKE_MESSAGE, idx)).await?; + let _: () = client.publish(channel, format!("{}-{}", FAKE_MESSAGE, idx)).await?; // pubsub messages may arrive out of order due to cross-cluster broadcasting sleep(Duration::from_millis(50)).await; @@ -124,7 +124,7 @@ pub async fn should_psubscribe_and_recv_messages(client: RedisClient, _: RedisCo Ok(()) } -pub async fn should_unsubscribe_from_all(publisher: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_unsubscribe_from_all(publisher: Client, _: Config) -> Result<(), Error> { let subscriber = publisher.clone_new(); let connection = subscriber.connect(); subscriber.wait_for_connect().await?; @@ -137,23 +137,23 @@ pub async fn should_unsubscribe_from_all(publisher: RedisClient, _: RedisConfig) panic!("Recv unexpected pubsub message: {:?}", message); } - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); subscriber.unsubscribe(()).await?; sleep(Duration::from_secs(1)).await; // make sure the response buffer is flushed correctly by this point - assert_eq!(subscriber.ping::().await?, "PONG"); - assert_eq!(subscriber.ping::().await?, "PONG"); - assert_eq!(subscriber.ping::().await?, "PONG"); + assert_eq!(subscriber.ping::(None).await?, "PONG"); + assert_eq!(subscriber.ping::(None).await?, "PONG"); + assert_eq!(subscriber.ping::(None).await?, "PONG"); subscriber.quit().await?; let _ = connection.await?; Ok(()) } -pub async fn should_get_pubsub_channels(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_pubsub_channels(client: Client, _: Config) -> Result<(), Error> { let subscriber = client.clone_new(); subscriber.connect(); subscriber.wait_for_connect().await?; @@ -182,7 +182,7 @@ pub async fn should_get_pubsub_channels(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_get_pubsub_numpat(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_pubsub_numpat(client: Client, _: Config) -> Result<(), Error> { let subscriber = client.clone_new(); subscriber.connect(); subscriber.wait_for_connect().await?; @@ -196,7 +196,7 @@ pub async fn should_get_pubsub_numpat(client: RedisClient, _: RedisConfig) -> Re Ok(()) } -pub async fn should_get_pubsub_nunmsub(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_pubsub_nunmsub(client: Client, _: Config) -> Result<(), Error> { let subscriber = client.clone_new(); subscriber.connect(); subscriber.wait_for_connect().await?; @@ -220,7 +220,7 @@ pub async fn should_get_pubsub_nunmsub(client: RedisClient, _: RedisConfig) -> R Ok(()) } -pub async fn should_get_pubsub_shard_channels(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_pubsub_shard_channels(client: Client, _: Config) -> Result<(), Error> { let subscriber = client.clone_new(); subscriber.connect(); subscriber.wait_for_connect().await?; @@ -239,7 +239,7 @@ pub async fn should_get_pubsub_shard_channels(client: RedisClient, _: RedisConfi Ok(()) } -pub async fn should_get_pubsub_shard_numsub(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_pubsub_shard_numsub(client: Client, _: Config) -> Result<(), Error> { let subscriber = client.clone_new(); subscriber.connect(); subscriber.wait_for_connect().await?; diff --git a/tests/integration/redis_json/mod.rs b/tests/integration/redis_json/mod.rs index 952106c0..167025af 100644 --- a/tests/integration/redis_json/mod.rs +++ b/tests/integration/redis_json/mod.rs @@ -1,31 +1,31 @@ use fred::{ - clients::RedisClient, - error::RedisError, + clients::Client, + error::Error, interfaces::RedisJsonInterface, json_quote, - types::{RedisConfig, RedisValue}, + types::{config::Config, Value}, util::NONE, }; -use serde_json::{json, Value}; +use serde_json::{json, Value as JsonValue}; -pub async fn should_get_and_set_basic_obj(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let value: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; - assert_eq!(value, Value::Null); +pub async fn should_get_and_set_basic_obj(client: Client, _: Config) -> Result<(), Error> { + let value: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; + assert_eq!(value, JsonValue::Null); let value = json!({ "a": "b", "c": 1 }); let _: () = client.json_set("foo", "$", value.clone(), None).await?; - let result: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; + let result: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; assert_eq!(value, result[0]); Ok(()) } -pub async fn should_get_and_set_stringified_obj(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let value: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; - assert_eq!(value, Value::Null); +pub async fn should_get_and_set_stringified_obj(client: Client, _: Config) -> Result<(), Error> { + let value: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; + assert_eq!(value, JsonValue::Null); let value = json!({ "a": "b", @@ -34,13 +34,13 @@ pub async fn should_get_and_set_stringified_obj(client: RedisClient, _: RedisCon let _: () = client .json_set("foo", "$", serde_json::to_string(&value)?, None) .await?; - let result: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; + let result: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; assert_eq!(value, result[0]); Ok(()) } -pub async fn should_array_append(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_array_append(client: Client, _: Config) -> Result<(), Error> { let _: () = client.json_set("foo", "$", json!(["a", "b"]), None).await?; // need to double quote string values @@ -53,13 +53,13 @@ pub async fn should_array_append(client: RedisClient, _: RedisConfig) -> Result< let len: i64 = client.json_arrlen("foo", NONE).await?; assert_eq!(len, 5); - let result: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; + let result: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; assert_eq!(result[0], json!(["a", "b", "c", "d", {"e": "f"}])); Ok(()) } -pub async fn should_modify_arrays(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_modify_arrays(client: Client, _: Config) -> Result<(), Error> { let _: () = client.json_set("foo", "$", json!(["a", "d"]), None).await?; let len: i64 = client .json_arrinsert("foo", "$", 1, vec![json_quote!("b"), json_quote!("c")]) @@ -73,74 +73,74 @@ pub async fn should_modify_arrays(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_pop_and_trim_arrays(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_pop_and_trim_arrays(client: Client, _: Config) -> Result<(), Error> { let _: () = client.json_set("foo", "$", json!(["a", "b"]), None).await?; - let val: Value = client.json_arrpop("foo", NONE, None).await?; + let val: JsonValue = client.json_arrpop("foo", NONE, None).await?; assert_eq!(val, json!("b")); let _: () = client.json_set("foo", "$", json!(["a", "b", "c", "d"]), None).await?; let len: usize = client.json_arrtrim("foo", "$", 0, -2).await?; assert_eq!(len, 3); - let vals: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; + let vals: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; assert_eq!(vals[0], json!(["a", "b", "c"])); Ok(()) } -pub async fn should_get_set_del_obj(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_set_del_obj(client: Client, _: Config) -> Result<(), Error> { let value = json!({ "a": "b", "c": 1, "d": true }); let _: () = client.json_set("foo", "$", value.clone(), None).await?; - let result: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; + let result: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; assert_eq!(value, result[0]); let count: i64 = client.json_del("foo", "$..c").await?; assert_eq!(count, 1); - let result: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; + let result: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; assert_eq!(result[0], json!({ "a": "b", "d": true })); Ok(()) } -pub async fn should_merge_objects(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_merge_objects(client: Client, _: Config) -> Result<(), Error> { let foo = json!({ "a": "b", "c": { "d": "e" } }); let bar = json!({ "a": "b1", "c": { "d1": "e1" }, "y": "z" }); let expected = json!({ "a": "b1", "c": {"d": "e", "d1": "e1"}, "y": "z" }); let _: () = client.json_set("foo", "$", foo.clone(), None).await?; let _: () = client.json_merge("foo", "$", bar.clone()).await?; - let merged: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; + let merged: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; assert_eq!(merged[0], expected); Ok(()) } -pub async fn should_mset_and_mget(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_mset_and_mget(client: Client, _: Config) -> Result<(), Error> { let values = [json!({ "a": "b" }), json!({ "c": "d" })]; let args = vec![("foo{1}", "$", values[0].clone()), ("bar{1}", "$", values[1].clone())]; let _: () = client.json_mset(args).await?; - let result: Value = client.json_mget(vec!["foo{1}", "bar{1}"], "$").await?; + let result: JsonValue = client.json_mget(vec!["foo{1}", "bar{1}"], "$").await?; // response is nested: Array [Array [Object {"a": String("b")}], Array [Object {"c": String("d")}]] assert_eq!(result, json!([[values[0]], [values[1]]])); Ok(()) } -pub async fn should_incr_numbers(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_incr_numbers(client: Client, _: Config) -> Result<(), Error> { let _: () = client.json_set("foo", "$", json!({ "a": 1 }), None).await?; - let vals: Value = client.json_numincrby("foo", "$.a", 2).await?; + let vals: JsonValue = client.json_numincrby("foo", "$.a", 2).await?; assert_eq!(vals[0], 3); Ok(()) } -pub async fn should_inspect_objects(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_inspect_objects(client: Client, _: Config) -> Result<(), Error> { let value = json!({ "a": "b", "e": { @@ -163,7 +163,7 @@ pub async fn should_inspect_objects(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_modify_strings(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_modify_strings(client: Client, _: Config) -> Result<(), Error> { let _: () = client.json_set("foo", "$", json!({ "a": "abc123" }), None).await?; let len: usize = client.json_strlen("foo", Some("$.a")).await?; assert_eq!(len, 6); @@ -172,13 +172,13 @@ pub async fn should_modify_strings(client: RedisClient, _: RedisConfig) -> Resul assert_eq!(len, 9); let len: usize = client.json_strlen("foo", Some("$.a")).await?; assert_eq!(len, 9); - let value: Value = client.json_get("foo", NONE, NONE, NONE, "$").await?; + let value: JsonValue = client.json_get("foo", NONE, NONE, NONE, "$").await?; assert_eq!(value[0], json!({ "a": "abc123456" })); Ok(()) } -pub async fn should_toggle_boolean(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_toggle_boolean(client: Client, _: Config) -> Result<(), Error> { let _: () = client.json_set("foo", "$", json!({ "a": 1, "b": true }), None).await?; let new_val: bool = client.json_toggle("foo", "$.b").await?; assert!(!new_val); @@ -186,7 +186,7 @@ pub async fn should_toggle_boolean(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_get_value_type(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_value_type(client: Client, _: Config) -> Result<(), Error> { let _: () = client.json_set("foo", "$", json!({ "a": 1, "b": true }), None).await?; let val: String = client.json_type("foo", NONE).await?; assert_eq!(val, "object"); diff --git a/tests/integration/redisearch/mod.rs b/tests/integration/redisearch/mod.rs index 75331fdd..8f5d5de3 100644 --- a/tests/integration/redisearch/mod.rs +++ b/tests/integration/redisearch/mod.rs @@ -1,16 +1,18 @@ use fred::{ - error::RedisError, + error::Error, prelude::*, types::{ - AggregateOperation, - FtAggregateOptions, - FtCreateOptions, - FtSearchOptions, - IndexKind, - Load, - RedisMap, - SearchSchema, - SearchSchemaKind, + redisearch::{ + AggregateOperation, + FtAggregateOptions, + FtCreateOptions, + FtSearchOptions, + IndexKind, + Load, + SearchSchema, + SearchSchemaKind, + }, + Map, }, util::NONE, }; @@ -19,10 +21,10 @@ use rand::{thread_rng, Rng}; use redis_protocol::resp3::types::RespVersion; use std::{collections::HashMap, time::Duration}; -pub async fn should_list_indexes(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_list_indexes(client: Client, _: Config) -> Result<(), Error> { assert!(client.ft_list::>().await?.is_empty()); - client + let _: () = client .ft_create("foo", FtCreateOptions::default(), vec![SearchSchema { field_name: "bar".into(), alias: Some("baz".into()), @@ -38,10 +40,10 @@ pub async fn should_list_indexes(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_index_and_info_basic_hash(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_index_and_info_basic_hash(client: Client, _: Config) -> Result<(), Error> { assert!(client.ft_list::>().await?.is_empty()); - client + let _: () = client .ft_create( "foo_idx", FtCreateOptions { @@ -64,19 +66,19 @@ pub async fn should_index_and_info_basic_hash(client: RedisClient, _: RedisConfi ) .await?; - client.hset("foo", ("bar", "abc123")).await?; + let _: () = client.hset("foo", ("bar", "abc123")).await?; tokio::time::sleep(Duration::from_millis(100)).await; - let mut info: HashMap = client.ft_info("foo_idx").await?; - assert_eq!(info.remove("num_docs").unwrap_or(RedisValue::Null).convert::()?, 1); + let mut info: HashMap = client.ft_info("foo_idx").await?; + assert_eq!(info.remove("num_docs").unwrap_or(Value::Null).convert::()?, 1); Ok(()) } -pub async fn should_index_and_search_hash(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_index_and_search_hash(client: Client, _: Config) -> Result<(), Error> { assert!(client.ft_list::>().await?.is_empty()); - client + let _: () = client .ft_create( "foo_idx", FtCreateOptions { @@ -100,20 +102,19 @@ pub async fn should_index_and_search_hash(client: RedisClient, _: RedisConfig) - ) .await?; - client.hset("record:1", ("bar", "abc 123")).await?; - client.hset("record:2", ("bar", "abc 345")).await?; - client.hset("record:3", ("bar", "def 678")).await?; + let _: () = client.hset("record:1", ("bar", "abc 123")).await?; + let _: () = client.hset("record:2", ("bar", "abc 345")).await?; + let _: () = client.hset("record:3", ("bar", "def 678")).await?; tokio::time::sleep(Duration::from_millis(100)).await; if client.protocol_version() == RespVersion::RESP3 { // RESP3 uses maps and includes extra metadata fields - let mut results: HashMap = - client.ft_search("foo_idx", "*", FtSearchOptions::default()).await?; + let mut results: HashMap = client.ft_search("foo_idx", "*", FtSearchOptions::default()).await?; assert_eq!( results .get("total_results") .cloned() - .unwrap_or(RedisValue::Null) + .unwrap_or(Value::Null) .convert::()?, 3 ); @@ -122,25 +123,25 @@ pub async fn should_index_and_search_hash(client: RedisClient, _: RedisConfig) - // 123"},"id":"record:1","values":[]},{"extra_attributes":{"bar":"abc // 345"},"id":"record:2","values":[]},{"extra_attributes":{"bar":"def // 678"},"id":"record:3","values":[]}],"total_results":3,"warning":[]} - let results: Vec> = results.remove("results").unwrap().convert()?; + let results: Vec> = results.remove("results").unwrap().convert()?; let expected = vec![ hashmap! { "id" => "record:1".into(), - "values" => RedisValue::Array(vec![]), + "values" => Value::Array(vec![]), "extra_attributes" => hashmap! { "bar" => "abc 123" }.try_into()? }, hashmap! { "id" => "record:2".into(), - "values" => RedisValue::Array(vec![]), + "values" => Value::Array(vec![]), "extra_attributes" => hashmap! { "bar" => "abc 345" }.try_into()? }, hashmap! { "id" => "record:3".into(), - "values" => RedisValue::Array(vec![]), + "values" => Value::Array(vec![]), "extra_attributes" => hashmap! { "bar" => "def 678" } @@ -151,27 +152,27 @@ pub async fn should_index_and_search_hash(client: RedisClient, _: RedisConfig) - .map(|m| { m.into_iter() .map(|(k, v)| (k.to_string(), v)) - .collect::>() + .collect::>() }) .collect::>(); assert_eq!(results, expected); } else { // RESP2 uses an array format w/o extra metadata - let results: (usize, RedisKey, RedisKey, RedisKey) = client + let results: (usize, Key, Key, Key) = client .ft_search("foo_idx", "*", FtSearchOptions { nocontent: true, ..Default::default() }) .await?; assert_eq!(results, (3, "record:1".into(), "record:2".into(), "record:3".into())); - let results: (usize, RedisKey, RedisKey) = client + let results: (usize, Key, Key) = client .ft_search("foo_idx", "@bar:(abc)", FtSearchOptions { nocontent: true, ..Default::default() }) .await?; assert_eq!(results, (2, "record:1".into(), "record:2".into())); - let results: (usize, RedisKey, (String, String)) = client + let results: (usize, Key, (String, String)) = client .ft_search("foo_idx", "@bar:(def)", FtSearchOptions::default()) .await?; assert_eq!(results, (1, "record:3".into(), ("bar".into(), "def 678".into()))); @@ -180,11 +181,11 @@ pub async fn should_index_and_search_hash(client: RedisClient, _: RedisConfig) - Ok(()) } -pub async fn should_index_and_aggregate_timestamps(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_index_and_aggregate_timestamps(client: Client, _: Config) -> Result<(), Error> { assert!(client.ft_list::>().await?.is_empty()); // https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/aggregations/ - client + let _: () = client .ft_create( "timestamp_idx", FtCreateOptions { @@ -206,7 +207,7 @@ pub async fn should_index_and_aggregate_timestamps(client: RedisClient, _: Redis for idx in 0 .. 100 { let rand: u64 = thread_rng().gen_range(0 .. 10000); - client + let _: () = client .hset(format!("record:{}", idx), [ ("timestamp", idx), ("user_id", idx + 1000), @@ -221,7 +222,7 @@ pub async fn should_index_and_aggregate_timestamps(client: RedisClient, _: Redis // FT.AGGREGATE myIndex "*" // APPLY "@timestamp - (@timestamp % 3600)" AS hour - let mut result: HashMap = client + let mut result: HashMap = client .ft_aggregate("timestamp_idx", "*", FtAggregateOptions { load: Some(Load::All), pipeline: vec![AggregateOperation::Apply { @@ -232,9 +233,9 @@ pub async fn should_index_and_aggregate_timestamps(client: RedisClient, _: Redis }) .await?; - let results: Vec = result.remove("results").unwrap().convert()?; + let results: Vec = result.remove("results").unwrap().convert()?; for (idx, val) in results.into_iter().enumerate() { - let mut val: HashMap = val.convert()?; + let mut val: HashMap = val.convert()?; let mut val: HashMap = val.remove("extra_attributes").unwrap().convert()?; assert_eq!(val.remove("timestamp").unwrap(), idx); assert_eq!(val.remove("hour").unwrap(), 0); @@ -243,7 +244,7 @@ pub async fn should_index_and_aggregate_timestamps(client: RedisClient, _: Redis } else { // FT.AGGREGATE myIndex "*" // APPLY "@timestamp - (@timestamp % 3600)" AS hour - let result: Vec = client + let result: Vec = client .ft_aggregate("timestamp_idx", "*", FtAggregateOptions { load: Some(Load::All), pipeline: vec![AggregateOperation::Apply { diff --git a/tests/integration/scanning/mod.rs b/tests/integration/scanning/mod.rs index 039c05ca..d5b35e74 100644 --- a/tests/integration/scanning/mod.rs +++ b/tests/integration/scanning/mod.rs @@ -1,18 +1,19 @@ #![allow(dead_code)] use fred::{ prelude::*, - types::{ScanResult, Scanner}, + types::scan::{ScanResult, Scanner}, }; use futures::{Stream, TryStreamExt}; // tokio_stream has a more flexible version of `collect` +use bytes_utils::Str; use tokio_stream::StreamExt; const SCAN_KEYS: i64 = 100; #[cfg(feature = "i-keys")] -pub async fn should_scan_keyspace(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_scan_keyspace(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. SCAN_KEYS { - client + let _: () = client .set(format!("foo-{}-{}", idx, "{1}"), idx, None, None, false) .await?; } @@ -32,7 +33,7 @@ pub async fn should_scan_keyspace(client: RedisClient, _: RedisConfig) -> Result panic!("Empty results in scan."); } - result.next()?; + result.next(); Ok(count) }) .await?; @@ -42,10 +43,10 @@ pub async fn should_scan_keyspace(client: RedisClient, _: RedisConfig) -> Result } #[cfg(feature = "i-hashes")] -pub async fn should_hscan_hash(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_hscan_hash(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. SCAN_KEYS { let value = (format!("bar-{}", idx), idx); - client.hset("foo", value).await?; + let _: () = client.hset("foo", value).await?; } let count = client @@ -63,7 +64,7 @@ pub async fn should_hscan_hash(client: RedisClient, _: RedisConfig) -> Result<() panic!("Empty results in hscan."); } - result.next()?; + result.next(); Ok(count) }) .await?; @@ -73,9 +74,9 @@ pub async fn should_hscan_hash(client: RedisClient, _: RedisConfig) -> Result<() } #[cfg(feature = "i-sets")] -pub async fn should_sscan_set(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_sscan_set(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. SCAN_KEYS { - client.sadd("foo", idx).await?; + let _: () = client.sadd("foo", idx).await?; } let count = client @@ -91,7 +92,7 @@ pub async fn should_sscan_set(client: RedisClient, _: RedisConfig) -> Result<(), panic!("Empty sscan result"); } - result.next()?; + result.next(); Ok(count) }) .await?; @@ -101,10 +102,10 @@ pub async fn should_sscan_set(client: RedisClient, _: RedisConfig) -> Result<(), } #[cfg(feature = "i-sorted-sets")] -pub async fn should_zscan_sorted_set(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zscan_sorted_set(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. SCAN_KEYS { let (score, value) = (idx as f64, format!("foo-{}", idx)); - client.zadd("foo", None, None, false, false, (score, value)).await?; + let _: () = client.zadd("foo", None, None, false, false, (score, value)).await?; } let count = client @@ -124,7 +125,7 @@ pub async fn should_zscan_sorted_set(client: RedisClient, _: RedisConfig) -> Res panic!("Empty zscan result"); } - result.next()?; + result.next(); Ok(count) }) .await?; @@ -134,9 +135,9 @@ pub async fn should_zscan_sorted_set(client: RedisClient, _: RedisConfig) -> Res } #[cfg(feature = "i-keys")] -pub async fn should_scan_cluster(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_scan_cluster(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. 2000 { - client.set(idx, idx, None, None, false).await?; + let _: () = client.set(idx, idx, None, None, false).await?; } let mut count = 0; @@ -144,7 +145,7 @@ pub async fn should_scan_cluster(client: RedisClient, _: RedisConfig) -> Result< while let Some(Ok(mut page)) = scan_stream.next().await { let results = page.take_results(); count += results.unwrap().len(); - let _ = page.next(); + page.next(); } assert_eq!(count, 2000); @@ -152,19 +153,19 @@ pub async fn should_scan_cluster(client: RedisClient, _: RedisConfig) -> Result< } #[cfg(feature = "i-keys")] -pub async fn should_scan_buffered(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_scan_buffered(client: Client, _: Config) -> Result<(), Error> { let mut expected = Vec::with_capacity(100); for idx in 0 .. 100 { // write everything to the same cluster node - let key: RedisKey = format!("foo-{{1}}-{}", idx).into(); + let key: Key = format!("foo-{{1}}-{}", idx).into(); expected.push(key.clone()); let _: () = client.set(key, idx, None, None, false).await?; } expected.sort(); - let mut keys: Vec = client + let mut keys: Vec = client .scan_buffered("foo-{1}*", Some(20), None) - .collect::, RedisError>>() + .collect::, Error>>() .await?; keys.sort(); @@ -173,18 +174,18 @@ pub async fn should_scan_buffered(client: RedisClient, _: RedisConfig) -> Result } #[cfg(feature = "i-keys")] -pub async fn should_scan_cluster_buffered(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_scan_cluster_buffered(client: Client, _: Config) -> Result<(), Error> { let mut expected = Vec::with_capacity(100); for idx in 0 .. 100 { - let key: RedisKey = format!("foo-{}", idx).into(); + let key: Key = format!("foo-{}", idx).into(); expected.push(key.clone()); let _: () = client.set(key, idx, None, None, false).await?; } expected.sort(); - let mut keys: Vec = client + let mut keys: Vec = client .scan_cluster_buffered("foo*", Some(20), None) - .collect::, RedisError>>() + .collect::, Error>>() .await?; keys.sort(); @@ -193,7 +194,7 @@ pub async fn should_scan_cluster_buffered(client: RedisClient, _: RedisConfig) - } #[cfg(feature = "i-keys")] -fn scan_all(client: &RedisClient, page_size: Option) -> impl Stream> { +fn scan_all(client: &Client, page_size: Option) -> impl Stream> { use futures::StreamExt; if client.is_clustered() { @@ -204,9 +205,9 @@ fn scan_all(client: &RedisClient, page_size: Option) -> impl Stream Result<(), RedisError> { +pub async fn should_continue_scanning_on_page_drop(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. 100 { - let key: RedisKey = format!("foo-{}", idx).into(); + let key: Key = format!("foo-{}", idx).into(); let _: () = client.set(key, idx, None, None, false).await?; } @@ -220,3 +221,62 @@ pub async fn should_continue_scanning_on_page_drop(client: RedisClient, _: Redis Ok(()) } + +#[cfg(feature = "i-keys")] +pub async fn should_scan_by_page_centralized(client: Client, _: Config) -> Result<(), Error> { + for idx in 0 .. 100 { + let key: Key = format!("foo-{}", idx).into(); + let _: () = client.set(key, idx, None, None, false).await?; + } + let mut cursor: Str = "0".into(); + let mut count = 0; + + loop { + let (new_cursor, keys): (Str, Vec) = client.scan_page(cursor, "*", None, None).await?; + count += keys.len(); + + if new_cursor == "0" { + break; + } else { + cursor = new_cursor; + } + } + + assert_eq!(count, 100); + Ok(()) +} + +#[cfg(all(feature = "i-keys", feature = "i-cluster"))] +pub async fn should_scan_by_page_clustered(client: Client, _: Config) -> Result<(), Error> { + for idx in 0 .. 100 { + let key: Key = format!("foo-{{1}}-{idx}").into(); + let _: () = client.set(key, idx, None, None, false).await?; + } + let mut cursor: Str = "0".into(); + let mut count = 0; + + let server = client + .cached_cluster_state() + .and_then(|state| { + let slot = redis_protocol::redis_keyslot(b"foo-{1}-0"); + state.get_server(slot).cloned() + }) + .unwrap(); + + loop { + let (new_cursor, keys): (Str, Vec) = client + .with_cluster_node(&server) + .scan_page(cursor, "*", None, None) + .await?; + count += keys.len(); + + if new_cursor == "0" { + break; + } else { + cursor = new_cursor; + } + } + + assert_eq!(count, 100); + Ok(()) +} diff --git a/tests/integration/server/mod.rs b/tests/integration/server/mod.rs index 8b312c2a..95bf9503 100644 --- a/tests/integration/server/mod.rs +++ b/tests/integration/server/mod.rs @@ -2,12 +2,12 @@ use fred::{cmd, prelude::*}; use std::time::Duration; use tokio::time::sleep; -pub async fn should_flushall(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.custom(cmd!("SET"), vec!["foo{1}", "bar"]).await?; +pub async fn should_flushall(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.custom(cmd!("SET"), vec!["foo{1}", "bar"]).await?; if client.is_clustered() { client.flushall_cluster().await?; } else { - client.flushall(false).await?; + let _: () = client.flushall(false).await?; }; let result: Option = client.custom(cmd!("GET"), vec!["foo{1}"]).await?; @@ -16,35 +16,31 @@ pub async fn should_flushall(client: RedisClient, _: RedisConfig) -> Result<(), Ok(()) } -pub async fn should_read_server_info(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_read_server_info(client: Client, _: Config) -> Result<(), Error> { let info: Option = client.info(None).await?; assert!(info.is_some()); Ok(()) } -pub async fn should_ping_server(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.ping().await?; - - Ok(()) -} - -pub async fn should_run_custom_command(_client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - // TODO find a good third party module to test - +pub async fn should_ping_pong_command(client: Client, _: Config) -> Result<(), Error> { + let res: String = client.ping(None).await?; + assert_eq!(res, "PONG"); + let res: String = client.ping(Some("hello world!".into())).await?; + assert_eq!(res, "hello world!"); Ok(()) } -pub async fn should_read_last_save(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_read_last_save(client: Client, _: Config) -> Result<(), Error> { let lastsave: Option = client.lastsave().await?; assert!(lastsave.is_some()); Ok(()) } -pub async fn should_read_db_size(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_read_db_size(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. 50 { - client + let _: () = client .custom(cmd!("SET"), vec![format!("foo-{}", idx), idx.to_string()]) .await?; } @@ -57,7 +53,7 @@ pub async fn should_read_db_size(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_start_bgsave(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_start_bgsave(client: Client, _: Config) -> Result<(), Error> { let save_result: String = client.bgsave().await?; assert_eq!(save_result, "Background saving started"); @@ -66,11 +62,16 @@ pub async fn should_start_bgsave(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_do_bgrewriteaof(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.bgrewriteaof().await?; +pub async fn should_do_bgrewriteaof(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.bgrewriteaof().await?; // not much we can assert here aside from the command not failing // need to ensure this finishes before it runs again or it'll return an error sleep(Duration::from_millis(1000)).await; Ok(()) } + +pub async fn should_select_index_command(client: Client, _: Config) -> Result<(), Error> { + assert_eq!(client.select(0).await, Ok(())); + Ok(()) +} diff --git a/tests/integration/sets/mod.rs b/tests/integration/sets/mod.rs index 55c4dd6f..54011698 100644 --- a/tests/integration/sets/mod.rs +++ b/tests/integration/sets/mod.rs @@ -1,7 +1,7 @@ use fred::prelude::*; use std::collections::HashSet; -fn vec_to_set(data: Vec) -> HashSet { +fn vec_to_set(data: Vec) -> HashSet { let mut out = HashSet::with_capacity(data.len()); for value in data.into_iter() { out.insert(value); @@ -15,11 +15,11 @@ fn vec_to_set(data: Vec) -> HashSet { // &lhs == rhs // } -fn sets_eq(lhs: &HashSet, rhs: &HashSet) -> bool { +fn sets_eq(lhs: &HashSet, rhs: &HashSet) -> bool { lhs == rhs } -pub async fn should_sadd_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_sadd_elements(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.sadd("foo", "a").await?; assert_eq!(result, 1); let result: i64 = client.sadd("foo", vec!["b", "c"]).await?; @@ -30,7 +30,7 @@ pub async fn should_sadd_elements(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_scard_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_scard_elements(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.scard("foo").await?; assert_eq!(result, 0); @@ -42,30 +42,30 @@ pub async fn should_scard_elements(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_sdiff_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; - client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; - let result: HashSet = client.sdiff(vec!["foo{1}", "bar{1}"]).await?; +pub async fn should_sdiff_elements(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; + let _: () = client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; + let result: HashSet = client.sdiff(vec!["foo{1}", "bar{1}"]).await?; assert!(sets_eq(&result, &vec_to_set(vec!["1".into(), "2".into()]))); Ok(()) } -pub async fn should_sdiffstore_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; - client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; +pub async fn should_sdiffstore_elements(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; + let _: () = client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; let result: i64 = client.sdiffstore("baz{1}", vec!["foo{1}", "bar{1}"]).await?; assert_eq!(result, 2); - let result: HashSet = client.smembers("baz{1}").await?; + let result: HashSet = client.smembers("baz{1}").await?; assert!(sets_eq(&result, &vec_to_set(vec!["1".into(), "2".into()]))); Ok(()) } -pub async fn should_sinter_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; - client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; - let result: HashSet = client.sinter(vec!["foo{1}", "bar{1}"]).await?; +pub async fn should_sinter_elements(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; + let _: () = client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; + let result: HashSet = client.sinter(vec!["foo{1}", "bar{1}"]).await?; assert!(sets_eq( &result, @@ -75,12 +75,12 @@ pub async fn should_sinter_elements(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_sinterstore_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; - client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; +pub async fn should_sinterstore_elements(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; + let _: () = client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; let result: i64 = client.sinterstore("baz{1}", vec!["foo{1}", "bar{1}"]).await?; assert_eq!(result, 4); - let result: HashSet = client.smembers("baz{1}").await?; + let result: HashSet = client.smembers("baz{1}").await?; assert!(sets_eq( &result, @@ -90,8 +90,8 @@ pub async fn should_sinterstore_elements(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_check_sismember(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; +pub async fn should_check_sismember(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; let result: bool = client.sismember("foo", 1).await?; assert!(result); @@ -101,8 +101,8 @@ pub async fn should_check_sismember(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_check_smismember(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; +pub async fn should_check_smismember(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; let result: Vec = client.smismember("foo", vec![1, 2, 7]).await?; assert!(result[0]); @@ -115,9 +115,9 @@ pub async fn should_check_smismember(client: RedisClient, _: RedisConfig) -> Res Ok(()) } -pub async fn should_read_smembers(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; - let result: HashSet = client.smembers("foo").await?; +pub async fn should_read_smembers(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; + let result: HashSet = client.smembers("foo").await?; assert!(sets_eq( &result, &vec_to_set(vec![ @@ -133,9 +133,9 @@ pub async fn should_read_smembers(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_smove_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; - client.sadd("bar{1}", 5).await?; +pub async fn should_smove_elements(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; + let _: () = client.sadd("bar{1}", 5).await?; let result: i64 = client.smove("foo{1}", "bar{1}", 7).await?; assert_eq!(result, 0); @@ -144,8 +144,8 @@ pub async fn should_smove_elements(client: RedisClient, _: RedisConfig) -> Resul let result: i64 = client.smove("foo{1}", "bar{1}", 1).await?; assert_eq!(result, 1); - let foo: HashSet = client.smembers("foo{1}").await?; - let bar: HashSet = client.smembers("bar{1}").await?; + let foo: HashSet = client.smembers("foo{1}").await?; + let bar: HashSet = client.smembers("bar{1}").await?; assert!(sets_eq( &foo, &vec_to_set(vec!["2".into(), "3".into(), "4".into(), "6".into()]) @@ -155,14 +155,14 @@ pub async fn should_smove_elements(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_spop_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_spop_elements(client: Client, _: Config) -> Result<(), Error> { let expected = vec_to_set(vec!["1".into(), "2".into(), "3".into()]); - client.sadd("foo", vec![1, 2, 3]).await?; + let _: () = client.sadd("foo", vec![1, 2, 3]).await?; let result = client.spop("foo", None).await?; assert!(expected.contains(&result)); - let result: Vec = client.spop("foo", Some(3)).await?; + let result: Vec = client.spop("foo", Some(3)).await?; for value in result.into_iter() { assert!(expected.contains(&value)); } @@ -170,13 +170,13 @@ pub async fn should_spop_elements(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_get_random_member(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_get_random_member(client: Client, _: Config) -> Result<(), Error> { let expected = vec_to_set(vec!["1".into(), "2".into(), "3".into()]); - client.sadd("foo", vec![1, 2, 3]).await?; + let _: () = client.sadd("foo", vec![1, 2, 3]).await?; let result = client.srandmember("foo", None).await?; assert!(expected.contains(&result)); - let result: Vec = client.srandmember("foo", Some(3)).await?; + let result: Vec = client.srandmember("foo", Some(3)).await?; for value in result.into_iter() { assert!(expected.contains(&value)); } @@ -184,26 +184,26 @@ pub async fn should_get_random_member(client: RedisClient, _: RedisConfig) -> Re Ok(()) } -pub async fn should_remove_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_remove_elements(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client.srem("foo", 1).await?; assert_eq!(result, 0); - client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; + let _: () = client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; let result: i64 = client.srem("foo", 1).await?; assert_eq!(result, 1); let result: i64 = client.srem("foo", vec![2, 3, 4, 7]).await?; assert_eq!(result, 3); - let result: HashSet = client.smembers("foo").await?; + let result: HashSet = client.smembers("foo").await?; assert!(sets_eq(&result, &vec_to_set(vec!["5".into(), "6".into()]))); Ok(()) } -pub async fn should_sunion_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; - client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; - let result: HashSet = client.sunion(vec!["foo{1}", "bar{1}"]).await?; +pub async fn should_sunion_elements(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; + let _: () = client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; + let result: HashSet = client.sunion(vec!["foo{1}", "bar{1}"]).await?; assert!(sets_eq( &result, @@ -222,12 +222,12 @@ pub async fn should_sunion_elements(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_sunionstore_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; - client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; +pub async fn should_sunionstore_elements(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; + let _: () = client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; let result: i64 = client.sunionstore("baz{1}", vec!["foo{1}", "bar{1}"]).await?; assert_eq!(result, 8); - let result: HashSet = client.smembers("baz{1}").await?; + let result: HashSet = client.smembers("baz{1}").await?; assert!(sets_eq( &result, diff --git a/tests/integration/slowlog/mod.rs b/tests/integration/slowlog/mod.rs index 4195fa3e..b0aca3fa 100644 --- a/tests/integration/slowlog/mod.rs +++ b/tests/integration/slowlog/mod.rs @@ -1,14 +1,14 @@ use fred::{prelude::*, types::SlowlogEntry}; -pub async fn should_read_slowlog_length(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.slowlog_length().await?; +pub async fn should_read_slowlog_length(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.slowlog_length().await?; // cant assert much here since the tests run in any order, and the call to reset the slowlog might run just before // this Ok(()) } -pub async fn should_read_slowlog_entries(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_read_slowlog_entries(client: Client, _: Config) -> Result<(), Error> { let entries: Vec = client.slowlog_get(Some(10)).await?; for entry in entries.into_iter() { @@ -19,7 +19,7 @@ pub async fn should_read_slowlog_entries(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_reset_slowlog(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_reset_slowlog(client: Client, _: Config) -> Result<(), Error> { client.slowlog_reset().await?; let len: i64 = client.slowlog_length().await?; // the slowlog length call might show up here diff --git a/tests/integration/sorted_sets/mod.rs b/tests/integration/sorted_sets/mod.rs index 2601abf1..20a097e7 100644 --- a/tests/integration/sorted_sets/mod.rs +++ b/tests/integration/sorted_sets/mod.rs @@ -1,7 +1,7 @@ use float_cmp::approx_eq; use fred::{ prelude::*, - types::{Ordering, ZRange, ZRangeBound, ZRangeKind, ZSort}, + types::sorted_sets::{Ordering, ZRange, ZRangeBound, ZRangeKind, ZSort}, }; use std::{cmp::Ordering as CmpOrdering, convert::TryInto, time::Duration}; use tokio::time::sleep; @@ -18,24 +18,24 @@ fn f64_cmp(lhs: f64, rhs: f64) -> CmpOrdering { } } -async fn create_lex_data(client: &RedisClient, key: &str) -> Result, RedisError> { +async fn create_lex_data(client: &Client, key: &str) -> Result, Error> { let values: Vec<(f64, String)> = "abcdefghijklmnopqrstuvwxyz" .chars() .map(|c| (0.0, c.to_string())) .collect(); - client.zadd(key, None, None, false, false, values.clone()).await?; + let _: () = client.zadd(key, None, None, false, false, values.clone()).await?; Ok(values.into_iter().map(|(f, v)| (f, v.into())).collect()) } -async fn create_count_data(client: &RedisClient, key: &str) -> Result, RedisError> { - let values: Vec<(f64, RedisValue)> = (0 .. COUNT).map(|idx| (idx as f64, idx.to_string().into())).collect(); +async fn create_count_data(client: &Client, key: &str) -> Result, Error> { + let values: Vec<(f64, Value)> = (0 .. COUNT).map(|idx| (idx as f64, idx.to_string().into())).collect(); - client.zadd(key, None, None, false, false, values.clone()).await?; + let _: () = client.zadd(key, None, None, false, false, values.clone()).await?; Ok(values) } -pub async fn should_bzpopmin(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_bzpopmin(client: Client, _: Config) -> Result<(), Error> { let publisher_client = client.clone_new(); publisher_client.connect(); publisher_client.wait_for_connect().await?; @@ -46,7 +46,7 @@ pub async fn should_bzpopmin(client: RedisClient, _: RedisConfig) -> Result<(), assert_eq!(result, ("foo".into(), idx, idx as f64)); } - Ok::<(), RedisError>(()) + Ok::<(), Error>(()) }); for idx in 0 .. COUNT { @@ -60,7 +60,7 @@ pub async fn should_bzpopmin(client: RedisClient, _: RedisConfig) -> Result<(), Ok(()) } -pub async fn should_bzpopmax(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_bzpopmax(client: Client, _: Config) -> Result<(), Error> { let publisher_client = client.clone_new(); publisher_client.connect(); publisher_client.wait_for_connect().await?; @@ -71,7 +71,7 @@ pub async fn should_bzpopmax(client: RedisClient, _: RedisConfig) -> Result<(), assert_eq!(result, ("foo".into(), idx, idx as f64)); } - Ok::<(), RedisError>(()) + Ok::<(), Error>(()) }); for idx in 0 .. COUNT { @@ -87,7 +87,7 @@ pub async fn should_bzpopmax(client: RedisClient, _: RedisConfig) -> Result<(), Ok(()) } -pub async fn should_zadd_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zadd_values(client: Client, _: Config) -> Result<(), Error> { let result: i64 = client .zadd("foo", None, None, false, false, vec![(0.0, 0), (1.0, 1)]) .await?; @@ -175,7 +175,7 @@ pub async fn should_zadd_values(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_zcard_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zcard_values(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. COUNT { let values = vec![(idx as f64, idx), ((idx + COUNT) as f64, idx + COUNT)]; let result: i64 = client.zadd("foo", None, None, false, false, values).await?; @@ -190,7 +190,7 @@ pub async fn should_zcard_values(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_zcount_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zcount_values(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. COUNT { let values = vec![(idx as f64, idx), ((idx + COUNT) as f64, idx + COUNT)]; let result: i64 = client.zadd("foo", None, None, false, false, values).await?; @@ -207,8 +207,8 @@ pub async fn should_zcount_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_zdiff_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); +pub async fn should_zdiff_values(client: Client, _: Config) -> Result<(), Error> { + let mut expected: Vec<(f64, Value)> = Vec::with_capacity(COUNT as usize); for idx in 0 .. COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client @@ -217,11 +217,11 @@ pub async fn should_zdiff_values(client: RedisClient, _: RedisConfig) -> Result< assert_eq!(result, 1); } - let result: Vec = client.zdiff(vec!["foo{1}", "bar{1}"], false).await?; - let _expected: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); + let result: Vec = client.zdiff(vec!["foo{1}", "bar{1}"], false).await?; + let _expected: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); assert_eq!(result, _expected); - client + let _: () = client .zadd( "bar{1}", None, @@ -231,8 +231,8 @@ pub async fn should_zdiff_values(client: RedisClient, _: RedisConfig) -> Result< expected[0 .. expected.len() - 1].to_vec(), ) .await?; - let result: RedisValue = client.zdiff(vec!["foo{1}", "bar{1}"], true).await?; - let expected: Vec<(RedisValue, f64)> = expected.into_iter().map(|(s, v)| (v, s)).collect(); + let result: Value = client.zdiff(vec!["foo{1}", "bar{1}"], true).await?; + let expected: Vec<(Value, f64)> = expected.into_iter().map(|(s, v)| (v, s)).collect(); assert_eq!( result.into_zset_result().unwrap(), expected[expected.len() - 1 ..].to_vec() @@ -241,8 +241,8 @@ pub async fn should_zdiff_values(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_zdiffstore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); +pub async fn should_zdiffstore_values(client: Client, _: Config) -> Result<(), Error> { + let mut expected: Vec<(f64, Value)> = Vec::with_capacity(COUNT as usize); for idx in 0 .. COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client @@ -254,7 +254,7 @@ pub async fn should_zdiffstore_values(client: RedisClient, _: RedisConfig) -> Re let result: i64 = client.zdiffstore("baz{1}", vec!["foo{1}", "bar{1}"]).await?; assert_eq!(result, COUNT); - client + let _: () = client .zadd( "bar{1}", None, @@ -270,7 +270,7 @@ pub async fn should_zdiffstore_values(client: RedisClient, _: RedisConfig) -> Re Ok(()) } -pub async fn should_zincrby_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zincrby_values(client: Client, _: Config) -> Result<(), Error> { let result: f64 = client.zincrby("foo", 1.0, "a").await?; assert_eq!(result, 1.0); let result: f64 = client.zincrby("foo", 2.5, "a").await?; @@ -281,8 +281,8 @@ pub async fn should_zincrby_values(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_zinter_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); +pub async fn should_zinter_values(client: Client, _: Config) -> Result<(), Error> { + let mut expected: Vec<(f64, Value)> = Vec::with_capacity(COUNT as usize); for idx in 0 .. COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client @@ -291,10 +291,10 @@ pub async fn should_zinter_values(client: RedisClient, _: RedisConfig) -> Result assert_eq!(result, 1); } - let result: Vec = client.zinter(vec!["foo{1}", "bar{1}"], None, None, false).await?; + let result: Vec = client.zinter(vec!["foo{1}", "bar{1}"], None, None, false).await?; assert!(result.is_empty()); - client + let _: () = client .zadd( "bar{1}", None, @@ -304,9 +304,9 @@ pub async fn should_zinter_values(client: RedisClient, _: RedisConfig) -> Result expected[0 .. expected.len() - 1].to_vec(), ) .await?; - let result: RedisValue = client.zinter(vec!["foo{1}", "bar{1}"], None, None, true).await?; + let result: Value = client.zinter(vec!["foo{1}", "bar{1}"], None, None, true).await?; // scores are added together with a weight of 1 in this example - let mut expected: Vec<(RedisValue, f64)> = expected.into_iter().map(|(s, v)| (v, s * 2.0)).collect(); + let mut expected: Vec<(Value, f64)> = expected.into_iter().map(|(s, v)| (v, s * 2.0)).collect(); // zinter returns results in descending order based on score expected.reverse(); @@ -317,8 +317,8 @@ pub async fn should_zinter_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_zinterstore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); +pub async fn should_zinterstore_values(client: Client, _: Config) -> Result<(), Error> { + let mut expected: Vec<(f64, Value)> = Vec::with_capacity(COUNT as usize); for idx in 0 .. COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client @@ -332,7 +332,7 @@ pub async fn should_zinterstore_values(client: RedisClient, _: RedisConfig) -> R .await?; assert_eq!(result, 0); - client + let _: () = client .zadd( "bar{1}", None, @@ -350,7 +350,7 @@ pub async fn should_zinterstore_values(client: RedisClient, _: RedisConfig) -> R Ok(()) } -pub async fn should_zlexcount(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zlexcount(client: Client, _: Config) -> Result<(), Error> { let _ = create_lex_data(&client, "foo").await?; let result: i64 = client.zlexcount("foo", "-", "+").await?; @@ -363,11 +363,11 @@ pub async fn should_zlexcount(client: RedisClient, _: RedisConfig) -> Result<(), Ok(()) } -pub async fn should_zpopmax(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zpopmax(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo").await?; for idx in 0 .. COUNT { - let result: RedisValue = client.zpopmax("foo", None).await?; + let result: Value = client.zpopmax("foo", None).await?; let (member, score) = result.into_zset_result().unwrap().pop().unwrap(); assert_eq!(score, (COUNT - idx - 1) as f64); assert_eq!(member, (COUNT - idx - 1).to_string().into()); @@ -378,11 +378,11 @@ pub async fn should_zpopmax(client: RedisClient, _: RedisConfig) -> Result<(), R Ok(()) } -pub async fn should_zpopmin(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zpopmin(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo").await?; for idx in 0 .. COUNT { - let result: RedisValue = client.zpopmin("foo", None).await?; + let result: Value = client.zpopmin("foo", None).await?; let (member, score) = result.into_zset_result().unwrap().pop().unwrap(); assert_eq!(score, idx as f64); assert_eq!(member, idx.to_string().into()); @@ -393,17 +393,17 @@ pub async fn should_zpopmin(client: RedisClient, _: RedisConfig) -> Result<(), R Ok(()) } -pub async fn should_zrandmember(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zrandmember(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo").await?; for _ in 0 .. COUNT * 2 { - let result: RedisValue = client.zrandmember("foo", Some((1, true))).await?; + let result: Value = client.zrandmember("foo", Some((1, true))).await?; let (member, score) = result.into_zset_result().unwrap().pop().unwrap(); assert!(score >= 0.0 && score < COUNT as f64); assert_eq!(member.into_string().unwrap(), score.to_string()); } - let result: RedisValue = client.zrandmember("foo", Some((COUNT, true))).await?; + let result: Value = client.zrandmember("foo", Some((COUNT, true))).await?; let result = result.into_zset_result().unwrap(); for (member, score) in result.into_iter() { assert!(score >= 0.0 && score < COUNT as f64); @@ -413,7 +413,7 @@ pub async fn should_zrandmember(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_zrangestore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zrangestore_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo{1}").await?; let result: i64 = client @@ -426,18 +426,18 @@ pub async fn should_zrangestore_values(client: RedisClient, _: RedisConfig) -> R Ok(()) } -pub async fn should_zrangebylex(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zrangebylex(client: Client, _: Config) -> Result<(), Error> { let expected = create_lex_data(&client, "foo").await?; - let expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); + let expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); - let old_result: RedisValue = client.zrangebylex("foo", "-", "+", None).await?; + let old_result: Value = client.zrangebylex("foo", "-", "+", None).await?; let new_result = client .zrange("foo", "-", "+", Some(ZSort::ByLex), false, None, false) .await?; assert_eq!(old_result, new_result); assert_eq!(old_result.into_array(), expected_values); - let old_result: RedisValue = client.zrangebylex("foo", "a", "[c", None).await?; + let old_result: Value = client.zrangebylex("foo", "a", "[c", None).await?; let new_result = client .zrange("foo", "a", "[c", Some(ZSort::ByLex), false, None, false) .await?; @@ -447,19 +447,19 @@ pub async fn should_zrangebylex(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_zrevrangebylex(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zrevrangebylex(client: Client, _: Config) -> Result<(), Error> { let expected = create_lex_data(&client, "foo").await?; - let mut expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); + let mut expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); expected_values.reverse(); - let old_result: RedisValue = client.zrevrangebylex("foo", "+", "-", None).await?; + let old_result: Value = client.zrevrangebylex("foo", "+", "-", None).await?; let new_result = client .zrange("foo", "+", "-", Some(ZSort::ByLex), true, None, false) .await?; assert_eq!(old_result, new_result); assert_eq!(old_result.into_array(), expected_values); - let old_result: RedisValue = client.zrevrangebylex("foo", "c", "[a", None).await?; + let old_result: Value = client.zrevrangebylex("foo", "c", "[a", None).await?; let new_result = client .zrange("foo", "[c", "a", Some(ZSort::ByLex), true, None, false) .await?; @@ -469,18 +469,18 @@ pub async fn should_zrevrangebylex(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_zrangebyscore(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zrangebyscore(client: Client, _: Config) -> Result<(), Error> { let expected = create_count_data(&client, "foo").await?; - let expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); + let expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); - let old_result: RedisValue = client.zrangebyscore("foo", "-inf", "+inf", false, None).await?; + let old_result: Value = client.zrangebyscore("foo", "-inf", "+inf", false, None).await?; let new_result = client .zrange("foo", "-inf", "+inf", Some(ZSort::ByScore), false, None, false) .await?; assert_eq!(old_result, new_result); assert_eq!(old_result.into_array(), expected_values); - let old_result: RedisValue = client + let old_result: Value = client .zrangebyscore("foo", (COUNT / 2) as f64, COUNT as f64, false, None) .await?; let new_result = client @@ -505,7 +505,7 @@ pub async fn should_zrangebyscore(client: RedisClient, _: RedisConfig) -> Result kind: ZRangeKind::Inclusive, range: (COUNT as f64).try_into()?, }; - let old_result: RedisValue = client.zrangebyscore("foo", &lower, &upper, false, None).await?; + let old_result: Value = client.zrangebyscore("foo", &lower, &upper, false, None).await?; let new_result = client .zrange("foo", &lower, &upper, Some(ZSort::ByScore), false, None, false) .await?; @@ -515,19 +515,19 @@ pub async fn should_zrangebyscore(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_zrevrangebyscore(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zrevrangebyscore(client: Client, _: Config) -> Result<(), Error> { let expected = create_count_data(&client, "foo").await?; - let mut expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); + let mut expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); expected_values.reverse(); - let old_result: RedisValue = client.zrevrangebyscore("foo", "+inf", "-inf", false, None).await?; + let old_result: Value = client.zrevrangebyscore("foo", "+inf", "-inf", false, None).await?; let new_result = client .zrange("foo", "+inf", "-inf", Some(ZSort::ByScore), true, None, false) .await?; assert_eq!(old_result, new_result); assert_eq!(old_result.into_array(), expected_values); - let old_result: RedisValue = client + let old_result: Value = client .zrevrangebyscore("foo", COUNT as f64, (COUNT / 2) as f64, false, None) .await?; let new_result = client @@ -552,7 +552,7 @@ pub async fn should_zrevrangebyscore(client: RedisClient, _: RedisConfig) -> Res kind: ZRangeKind::Inclusive, range: (COUNT as f64).try_into()?, }; - let old_result: RedisValue = client.zrevrangebyscore("foo", &upper, &lower, false, None).await?; + let old_result: Value = client.zrevrangebyscore("foo", &upper, &lower, false, None).await?; let new_result = client .zrange("foo", &upper, &lower, Some(ZSort::ByScore), true, None, false) .await?; @@ -562,21 +562,36 @@ pub async fn should_zrevrangebyscore(client: RedisClient, _: RedisConfig) -> Res Ok(()) } -pub async fn should_zrank_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zrank_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo").await?; for idx in 0 .. COUNT { - let result: i64 = client.zrank("foo", idx).await?; + let result: i64 = client.zrank("foo", idx, false).await?; assert_eq!(result, idx); } - let result: Option = client.zrank("foo", COUNT + 1).await?; + let result: Option = client.zrank("foo", COUNT + 1, false).await?; assert!(result.is_none()); Ok(()) } -pub async fn should_zrem_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zrank_values_withscore(client: Client, _: Config) -> Result<(), Error> { + let _ = create_count_data(&client, "foo").await?; + + for idx in 0 .. COUNT { + let (result, score): (i64, f64) = client.zrank("foo", idx, true).await?; + assert_eq!(result, idx); + assert_eq!(score, idx as f64); + } + + let result: Option<(i64, f64)> = client.zrank("foo", COUNT + 1, true).await?; + assert!(result.is_none()); + + Ok(()) +} + +pub async fn should_zrem_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo").await?; let result: i64 = client.zrem("foo", COUNT + 1).await?; @@ -597,7 +612,7 @@ pub async fn should_zrem_values(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_zremrangebylex(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zremrangebylex(client: Client, _: Config) -> Result<(), Error> { let expected = create_lex_data(&client, "foo").await?; let result: usize = client.zremrangebylex("foo", "-", "+").await?; assert_eq!(result, expected.len()); @@ -617,7 +632,7 @@ pub async fn should_zremrangebylex(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_zremrangebyrank(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zremrangebyrank(client: Client, _: Config) -> Result<(), Error> { let expected = create_count_data(&client, "foo").await?; let result: usize = client.zremrangebyrank("foo", 0, COUNT).await?; assert_eq!(result, expected.len()); @@ -636,7 +651,7 @@ pub async fn should_zremrangebyrank(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_zremrangebyscore(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zremrangebyscore(client: Client, _: Config) -> Result<(), Error> { let expected = create_count_data(&client, "foo").await?; let result: usize = client.zremrangebyscore("foo", 0 as f64, COUNT as f64).await?; assert_eq!(result, expected.len()); @@ -654,21 +669,21 @@ pub async fn should_zremrangebyscore(client: RedisClient, _: RedisConfig) -> Res Ok(()) } -pub async fn should_zrevrank_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zrevrank_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo").await?; - let result: Option = client.zrevrank("foo", COUNT + 1).await?; + let result: Option = client.zrevrank("foo", COUNT + 1, false).await?; assert!(result.is_none()); for idx in 0 .. COUNT { - let result: i64 = client.zrevrank("foo", idx).await?; + let result: i64 = client.zrevrank("foo", idx, false).await?; assert_eq!(result, COUNT - (idx + 1)); } Ok(()) } -pub async fn should_zscore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zscore_values(client: Client, _: Config) -> Result<(), Error> { let _ = create_count_data(&client, "foo").await?; for idx in 0 .. COUNT { @@ -682,8 +697,8 @@ pub async fn should_zscore_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_zunion_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); +pub async fn should_zunion_values(client: Client, _: Config) -> Result<(), Error> { + let mut expected: Vec<(f64, Value)> = Vec::with_capacity(COUNT as usize); for idx in 0 .. COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client @@ -692,11 +707,11 @@ pub async fn should_zunion_values(client: RedisClient, _: RedisConfig) -> Result assert_eq!(result, 1); } - let result: RedisValue = client.zunion(vec!["foo{1}", "bar{1}"], None, None, false).await?; - let _expected: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); + let result: Value = client.zunion(vec!["foo{1}", "bar{1}"], None, None, false).await?; + let _expected: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); assert_eq!(result.into_array(), _expected); - client + let _: () = client .zadd( "bar{1}", None, @@ -706,9 +721,9 @@ pub async fn should_zunion_values(client: RedisClient, _: RedisConfig) -> Result expected[0 .. expected.len() - 1].to_vec(), ) .await?; - let result: RedisValue = client.zunion(vec!["foo{1}", "bar{1}"], None, None, true).await?; + let result: Value = client.zunion(vec!["foo{1}", "bar{1}"], None, None, true).await?; // scores are added together with a weight of 1 in this example - let mut _expected: Vec<(RedisValue, f64)> = expected[0 .. expected.len() - 1] + let mut _expected: Vec<(Value, f64)> = expected[0 .. expected.len() - 1] .iter() .map(|(s, v)| (v.clone(), s * 2.0)) .collect(); @@ -723,8 +738,8 @@ pub async fn should_zunion_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_zunionstore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); +pub async fn should_zunionstore_values(client: Client, _: Config) -> Result<(), Error> { + let mut expected: Vec<(f64, Value)> = Vec::with_capacity(COUNT as usize); for idx in 0 .. COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client @@ -738,7 +753,7 @@ pub async fn should_zunionstore_values(client: RedisClient, _: RedisConfig) -> R .await?; assert_eq!(result, COUNT); - client + let _: () = client .zadd( "bar{1}", None, @@ -756,9 +771,9 @@ pub async fn should_zunionstore_values(client: RedisClient, _: RedisConfig) -> R Ok(()) } -pub async fn should_zmscore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_zmscore_values(client: Client, _: Config) -> Result<(), Error> { for idx in 0 .. COUNT { - client.zadd("foo", None, None, false, false, (idx as f64, idx)).await?; + let _: () = client.zadd("foo", None, None, false, false, (idx as f64, idx)).await?; } let result: Vec = client.zmscore("foo", vec![0, 1]).await?; @@ -769,8 +784,8 @@ pub async fn should_zmscore_values(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_zrangebyscore_neg_infinity(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client +pub async fn should_zrangebyscore_neg_infinity(client: Client, _: Config) -> Result<(), Error> { + let _: () = client .zadd("foo", None, None, false, false, vec![ (-10.0, "a"), (-5.0, "b"), diff --git a/tests/integration/streams/mod.rs b/tests/integration/streams/mod.rs index 4c111884..e520f0d0 100644 --- a/tests/integration/streams/mod.rs +++ b/tests/integration/streams/mod.rs @@ -1,22 +1,22 @@ use fred::{ cmd, prelude::*, - types::{XCapKind, XCapTrim, XReadResponse, XReadValue, XID}, + types::streams::{XCapKind, XCapTrim, XReadResponse, XReadValue, XID}, }; use std::{collections::HashMap, hash::Hash, time::Duration}; use tokio::time::sleep; type FakeExpectedValues = Vec>>; -async fn create_fake_group_and_stream(client: &RedisClient, key: &str) -> Result<(), RedisError> { +async fn create_fake_group_and_stream(client: &Client, key: &str) -> Result<(), Error> { client.xgroup_create(key, "group1", "$", true).await } async fn add_stream_entries( - client: &RedisClient, + client: &Client, key: &str, count: usize, -) -> Result<(Vec, FakeExpectedValues), RedisError> { +) -> Result<(Vec, FakeExpectedValues), Error> { let mut ids = Vec::with_capacity(count); let mut expected = Vec::with_capacity(count); for idx in 0 .. count { @@ -37,17 +37,17 @@ fn has_expected_value(expected: &FakeExpectedValues, actual: &FakeExpectedValues actual.iter().enumerate().fold(true, |b, (i, v)| b && v == &expected[i]) } -pub async fn should_xinfo_consumers(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let result: Result<(), RedisError> = client.xinfo_consumers("foo{1}", "group1").await; +pub async fn should_xinfo_consumers(client: Client, _: Config) -> Result<(), Error> { + let result: Result<(), Error> = client.xinfo_consumers("foo{1}", "group1").await; assert!(result.is_err()); create_fake_group_and_stream(&client, "foo{1}").await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; assert_eq!(consumers.len(), 1); assert_eq!(consumers[0].get("name"), Some(&"consumer1".to_owned())); - client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; assert_eq!(consumers.len(), 2); assert_eq!(consumers[0].get("name"), Some(&"consumer1".to_owned())); @@ -56,17 +56,17 @@ pub async fn should_xinfo_consumers(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_xinfo_groups(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let result: Result<(), RedisError> = client.xinfo_groups("foo{1}").await; +pub async fn should_xinfo_groups(client: Client, _: Config) -> Result<(), Error> { + let result: Result<(), Error> = client.xinfo_groups("foo{1}").await; assert!(result.is_err()); create_fake_group_and_stream(&client, "foo{1}").await?; - let result: Vec> = client.xinfo_groups("foo{1}").await?; + let result: Vec> = client.xinfo_groups("foo{1}").await?; assert_eq!(result.len(), 1); assert_eq!(result[0].get("name"), Some(&"group1".into())); - client.xgroup_create("foo{1}", "group2", "$", true).await?; - let result: Vec> = client.xinfo_groups("foo{1}").await?; + let _: () = client.xgroup_create("foo{1}", "group2", "$", true).await?; + let result: Vec> = client.xinfo_groups("foo{1}").await?; assert_eq!(result.len(), 2); assert_eq!(result[0].get("name"), Some(&"group1".into())); assert_eq!(result[1].get("name"), Some(&"group2".into())); @@ -74,22 +74,22 @@ pub async fn should_xinfo_groups(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } -pub async fn should_xinfo_streams(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let result: Result<(), RedisError> = client.xinfo_stream("foo{1}", true, None).await; +pub async fn should_xinfo_streams(client: Client, _: Config) -> Result<(), Error> { + let result: Result<(), Error> = client.xinfo_stream("foo{1}", true, None).await; assert!(result.is_err()); create_fake_group_and_stream(&client, "foo{1}").await?; - let mut result: HashMap = client.xinfo_stream("foo{1}", true, None).await?; + let mut result: HashMap = client.xinfo_stream("foo{1}", true, None).await?; assert!(result.len() >= 6); - assert_eq!(result.get("length"), Some(&RedisValue::Integer(0))); + assert_eq!(result.get("length"), Some(&Value::Integer(0))); - let groups: [HashMap; 1] = result.remove("groups").unwrap().convert()?; - assert_eq!(groups[0].get("name"), Some(&RedisValue::from("group1"))); + let groups: [HashMap; 1] = result.remove("groups").unwrap().convert()?; + assert_eq!(groups[0].get("name"), Some(&Value::from("group1"))); Ok(()) } -pub async fn should_xadd_auto_id_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xadd_auto_id_to_a_stream(client: Client, _: Config) -> Result<(), Error> { let result: String = client.xadd("foo{1}", false, None, "*", ("a", "b")).await?; assert!(!result.is_empty()); @@ -98,7 +98,7 @@ pub async fn should_xadd_auto_id_to_a_stream(client: RedisClient, _: RedisConfig Ok(()) } -pub async fn should_xadd_manual_id_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xadd_manual_id_to_a_stream(client: Client, _: Config) -> Result<(), Error> { let result: String = client.xadd("foo{1}", false, None, "1-0", ("a", "b")).await?; assert_eq!(result, "1-0"); @@ -107,8 +107,8 @@ pub async fn should_xadd_manual_id_to_a_stream(client: RedisClient, _: RedisConf Ok(()) } -pub async fn should_xadd_with_cap_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client +pub async fn should_xadd_with_cap_to_a_stream(client: Client, _: Config) -> Result<(), Error> { + let _: () = client .xadd("foo{1}", false, ("MAXLEN", "=", 1), "*", ("a", "b")) .await?; @@ -117,18 +117,18 @@ pub async fn should_xadd_with_cap_to_a_stream(client: RedisClient, _: RedisConfi Ok(()) } -pub async fn should_xadd_nomkstream_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xadd_nomkstream_to_a_stream(client: Client, _: Config) -> Result<(), Error> { let result: Option = client.xadd("foo{1}", true, None, "*", ("a", "b")).await?; assert!(result.is_none()); create_fake_group_and_stream(&client, "foo{1}").await?; - client.xadd("foo{1}", true, None, "*", ("a", "b")).await?; + let _: () = client.xadd("foo{1}", true, None, "*", ("a", "b")).await?; let len: usize = client.xlen("foo{1}").await?; assert_eq!(len, 1); Ok(()) } -pub async fn should_xtrim_a_stream_approx_cap(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xtrim_a_stream_approx_cap(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; @@ -137,7 +137,7 @@ pub async fn should_xtrim_a_stream_approx_cap(client: RedisClient, _: RedisConfi let len: usize = client.xlen("foo{1}").await?; assert_eq!(len, 3 - deleted); - client.custom(cmd!("DEL"), vec!["foo{1}"]).await?; + let _: () = client.custom(cmd!("DEL"), vec!["foo{1}"]).await?; create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; let deleted: usize = client @@ -150,7 +150,7 @@ pub async fn should_xtrim_a_stream_approx_cap(client: RedisClient, _: RedisConfi Ok(()) } -pub async fn should_xtrim_a_stream_eq_cap(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xtrim_a_stream_eq_cap(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; @@ -159,7 +159,7 @@ pub async fn should_xtrim_a_stream_eq_cap(client: RedisClient, _: RedisConfig) - let len: usize = client.xlen("foo{1}").await?; assert_eq!(len, 1); - client.custom(cmd!("DEL"), vec!["foo{1}"]).await?; + let _: () = client.custom(cmd!("DEL"), vec!["foo{1}"]).await?; create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; let deleted: usize = client.xtrim("foo{1}", (XCapKind::MaxLen, XCapTrim::Exact, 1)).await?; @@ -170,7 +170,7 @@ pub async fn should_xtrim_a_stream_eq_cap(client: RedisClient, _: RedisConfig) - Ok(()) } -pub async fn should_xdel_one_id_in_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xdel_one_id_in_a_stream(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let (ids, _) = add_stream_entries(&client, "foo{1}", 2).await?; @@ -181,7 +181,7 @@ pub async fn should_xdel_one_id_in_a_stream(client: RedisClient, _: RedisConfig) Ok(()) } -pub async fn should_xdel_multiple_ids_in_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xdel_multiple_ids_in_a_stream(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let (ids, _) = add_stream_entries(&client, "foo{1}", 3).await?; @@ -192,7 +192,7 @@ pub async fn should_xdel_multiple_ids_in_a_stream(client: RedisClient, _: RedisC Ok(()) } -pub async fn should_xrange_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xrange_no_count(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let (_, expected) = add_stream_entries(&client, "foo{1}", 3).await?; @@ -201,7 +201,7 @@ pub async fn should_xrange_no_count(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_xrange_values_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xrange_values_no_count(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let (ids, _) = add_stream_entries(&client, "foo{1}", 3).await?; @@ -211,7 +211,7 @@ pub async fn should_xrange_values_no_count(client: RedisClient, _: RedisConfig) Ok(()) } -pub async fn should_xrevrange_values_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xrevrange_values_no_count(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let (mut ids, _) = add_stream_entries(&client, "foo{1}", 3).await?; ids.reverse(); @@ -222,7 +222,7 @@ pub async fn should_xrevrange_values_no_count(client: RedisClient, _: RedisConfi Ok(()) } -pub async fn should_xrange_with_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xrange_with_count(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let (_, expected) = add_stream_entries(&client, "foo{1}", 3).await?; @@ -231,7 +231,7 @@ pub async fn should_xrange_with_count(client: RedisClient, _: RedisConfig) -> Re Ok(()) } -pub async fn should_xrevrange_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xrevrange_no_count(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let (_, mut expected) = add_stream_entries(&client, "foo{1}", 3).await?; expected.reverse(); @@ -241,7 +241,7 @@ pub async fn should_xrevrange_no_count(client: RedisClient, _: RedisConfig) -> R Ok(()) } -pub async fn should_xrevrange_with_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xrevrange_with_count(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let (_, mut expected) = add_stream_entries(&client, "foo{1}", 3).await?; expected.reverse(); @@ -251,7 +251,7 @@ pub async fn should_xrevrange_with_count(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_run_xlen_on_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_run_xlen_on_stream(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let len: usize = client.xlen("foo{1}").await?; assert_eq!(len, 0); @@ -262,7 +262,7 @@ pub async fn should_run_xlen_on_stream(client: RedisClient, _: RedisConfig) -> R Ok(()) } -pub async fn should_xread_map_one_key(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xread_map_one_key(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; @@ -276,7 +276,7 @@ pub async fn should_xread_map_one_key(client: RedisClient, _: RedisConfig) -> Re Ok(()) } -pub async fn should_xread_one_key_count_1(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xread_one_key_count_1(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let (mut ids, mut expected) = add_stream_entries(&client, "foo{1}", 3).await?; let _ = ids.pop().unwrap(); @@ -287,7 +287,7 @@ pub async fn should_xread_one_key_count_1(client: RedisClient, _: RedisConfig) - expected.insert("foo{1}".into(), vec![most_recent_expected]); let result: HashMap>>> = client - .xread::(Some(1), None, "foo{1}", second_recent_id) + .xread::(Some(1), None, "foo{1}", second_recent_id) .await? .flatten_array_values(1) .convert()?; @@ -296,7 +296,7 @@ pub async fn should_xread_one_key_count_1(client: RedisClient, _: RedisConfig) - Ok(()) } -pub async fn should_xread_multiple_keys_count_2(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xread_multiple_keys_count_2(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; create_fake_group_and_stream(&client, "bar{1}").await?; let (foo_ids, foo_inner) = add_stream_entries(&client, "foo{1}", 3).await?; @@ -308,7 +308,7 @@ pub async fn should_xread_multiple_keys_count_2(client: RedisClient, _: RedisCon let ids: Vec = vec![foo_ids[0].as_str().into(), bar_ids[0].as_str().into()]; let result: HashMap>>> = client - .xread::(Some(2), None, vec!["foo{1}", "bar{1}"], ids) + .xread::(Some(2), None, vec!["foo{1}", "bar{1}"], ids) .await? .flatten_array_values(1) .convert()?; @@ -317,7 +317,7 @@ pub async fn should_xread_multiple_keys_count_2(client: RedisClient, _: RedisCon Ok(()) } -pub async fn should_xread_with_blocking(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xread_with_blocking(client: Client, _: Config) -> Result<(), Error> { let expected_id = "123456789-0"; create_fake_group_and_stream(&client, "foo{1}").await?; @@ -334,15 +334,15 @@ pub async fn should_xread_with_blocking(client: RedisClient, _: RedisConfig) -> add_client.wait_for_connect().await?; sleep(Duration::from_millis(500)).await; - add_client + let _: () = add_client .xadd("foo{1}", false, None, expected_id, ("count", 100)) .await?; add_client.quit().await?; - Ok::<(), RedisError>(()) + Ok::<(), Error>(()) }); let result: HashMap>>> = client - .xread::(None, Some(5000), "foo{1}", XID::Max) + .xread::(None, Some(5000), "foo{1}", XID::Max) .await? .flatten_array_values(1) .convert()?; @@ -351,36 +351,36 @@ pub async fn should_xread_with_blocking(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_xgroup_create_no_mkstream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let result: Result = client.xgroup_create("foo{1}", "group1", "$", false).await; +pub async fn should_xgroup_create_no_mkstream(client: Client, _: Config) -> Result<(), Error> { + let result: Result = client.xgroup_create("foo{1}", "group1", "$", false).await; assert!(result.is_err()); - client.xadd("foo{1}", false, None, "*", ("count", 1)).await?; - client.xgroup_create("foo{1}", "group1", "$", false).await?; + let _: () = client.xadd("foo{1}", false, None, "*", ("count", 1)).await?; + let _: () = client.xgroup_create("foo{1}", "group1", "$", false).await?; Ok(()) } -pub async fn should_xgroup_create_mkstream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.xgroup_create("foo{1}", "group1", "$", true).await?; +pub async fn should_xgroup_create_mkstream(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.xgroup_create("foo{1}", "group1", "$", true).await?; let len: usize = client.xlen("foo{1}").await?; assert_eq!(len, 0); Ok(()) } -pub async fn should_xgroup_createconsumer(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xgroup_createconsumer(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let len: usize = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; assert_eq!(len, 1); - let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; - assert_eq!(consumers[0].get("name").unwrap(), &RedisValue::from("consumer1")); - assert_eq!(consumers[0].get("pending").unwrap(), &RedisValue::from(0)); + let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; + assert_eq!(consumers[0].get("name").unwrap(), &Value::from("consumer1")); + assert_eq!(consumers[0].get("pending").unwrap(), &Value::from(0)); Ok(()) } -pub async fn should_xgroup_delconsumer(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xgroup_delconsumer(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let len: usize = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; assert_eq!(len, 1); @@ -388,12 +388,12 @@ pub async fn should_xgroup_delconsumer(client: RedisClient, _: RedisConfig) -> R let len: usize = client.xgroup_delconsumer("foo{1}", "group1", "consumer1").await?; assert_eq!(len, 0); - let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; + let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; assert!(consumers.is_empty()); Ok(()) } -pub async fn should_xgroup_destroy(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xgroup_destroy(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let len: usize = client.xgroup_destroy("foo{1}", "group1").await?; assert_eq!(len, 1); @@ -401,17 +401,17 @@ pub async fn should_xgroup_destroy(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -pub async fn should_xgroup_setid(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xgroup_setid(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; - client.xgroup_setid("foo{1}", "group1", "12345-0").await?; + let _: () = client.xgroup_setid("foo{1}", "group1", "12345-0").await?; Ok(()) } -pub async fn should_xreadgroup_one_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xreadgroup_one_stream(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; let result: XReadResponse = client .xreadgroup_map("group1", "consumer1", None, None, false, "foo{1}", ">") @@ -426,13 +426,13 @@ pub async fn should_xreadgroup_one_stream(client: RedisClient, _: RedisConfig) - Ok(()) } -pub async fn should_xreadgroup_multiple_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xreadgroup_multiple_stream(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; create_fake_group_and_stream(&client, "bar{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; let _ = add_stream_entries(&client, "bar{1}", 1).await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; - client.xgroup_createconsumer("bar{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("bar{1}", "group1", "consumer1").await?; let result: XReadResponse = client .xreadgroup_map( @@ -458,9 +458,9 @@ pub async fn should_xreadgroup_multiple_stream(client: RedisClient, _: RedisConf Ok(()) } -pub async fn should_xreadgroup_block(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xreadgroup_block(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; let add_client = client.clone_new(); tokio::spawn(async move { @@ -468,9 +468,9 @@ pub async fn should_xreadgroup_block(client: RedisClient, _: RedisConfig) -> Res add_client.wait_for_connect().await?; sleep(Duration::from_secs(1)).await; - add_client.xadd("foo{1}", false, None, "*", ("count", 100)).await?; + let _: () = add_client.xadd("foo{1}", false, None, "*", ("count", 100)).await?; add_client.quit().await?; - Ok::<_, RedisError>(()) + Ok::<_, Error>(()) }); let mut result: XReadResponse = client @@ -486,10 +486,10 @@ pub async fn should_xreadgroup_block(client: RedisClient, _: RedisConfig) -> Res Ok(()) } -pub async fn should_xack_one_id(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xack_one_id(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 1).await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; let result: XReadResponse = client .xreadgroup_map("group1", "consumer1", None, None, false, "foo{1}", ">") @@ -503,10 +503,10 @@ pub async fn should_xack_one_id(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } -pub async fn should_xack_multiple_ids(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xack_multiple_ids(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; let result: XReadResponse = client .xreadgroup_map("group1", "consumer1", None, None, false, "foo{1}", ">") @@ -520,11 +520,11 @@ pub async fn should_xack_multiple_ids(client: RedisClient, _: RedisConfig) -> Re Ok(()) } -pub async fn should_xclaim_one_id(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xclaim_one_id(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; let mut result: XReadResponse = client .xreadgroup_map("group1", "consumer1", Some(1), None, false, "foo{1}", ">") @@ -566,11 +566,11 @@ pub async fn should_xclaim_one_id(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_xclaim_multiple_ids(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xclaim_multiple_ids(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; let mut result: XReadResponse = client .xreadgroup_map("group1", "consumer1", Some(2), None, false, "foo{1}", ">") @@ -618,11 +618,11 @@ pub async fn should_xclaim_multiple_ids(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_xclaim_with_justid(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xclaim_with_justid(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; let mut result: XReadResponse = client .xreadgroup_map("group1", "consumer1", Some(2), None, false, "foo{1}", ">") @@ -663,11 +663,11 @@ pub async fn should_xclaim_with_justid(client: RedisClient, _: RedisConfig) -> R Ok(()) } -pub async fn should_xautoclaim_default(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_xautoclaim_default(client: Client, _: Config) -> Result<(), Error> { create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; - client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; let mut result: XReadResponse = client .xreadgroup_map("group1", "consumer1", Some(2), None, false, "foo{1}", ">") diff --git a/tests/integration/timeseries/mod.rs b/tests/integration/timeseries/mod.rs index 648c49b8..13990a1b 100644 --- a/tests/integration/timeseries/mod.rs +++ b/tests/integration/timeseries/mod.rs @@ -1,25 +1,21 @@ use bytes_utils::Str; use fred::{ - clients::RedisClient, - error::RedisError, + clients::Client, + error::Error, interfaces::*, - prelude::RedisResult, + prelude::FredResult, types::{ - Aggregator, - GetLabels, - RedisConfig, - RedisKey, - RedisValue, - Resp2TimeSeriesValues, - Resp3TimeSeriesValues, - Timestamp, + config::Config, + timeseries::{Aggregator, GetLabels, Resp2TimeSeriesValues, Resp3TimeSeriesValues, Timestamp}, + Key, + Value, }, }; use redis_protocol::resp3::types::RespVersion; use std::{collections::HashMap, time::Duration}; use tokio::time::sleep; -pub async fn should_ts_add_get_and_range(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_ts_add_get_and_range(client: Client, _: Config) -> Result<(), Error> { let first_timestamp: i64 = client.ts_add("foo", "*", 41.0, None, None, None, None, ()).await?; assert!(first_timestamp > 0); sleep(Duration::from_millis(5)).await; @@ -36,16 +32,16 @@ pub async fn should_ts_add_get_and_range(client: RedisClient, _: RedisConfig) -> Ok(()) } -pub async fn should_create_alter_and_del_timeseries(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.ts_create("foo{1}", None, None, None, None, ("a", "b")).await?; - client.ts_alter("foo{1}", None, None, None, ("b", "c")).await?; +pub async fn should_create_alter_and_del_timeseries(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.ts_create("foo{1}", None, None, None, None, ("a", "b")).await?; + let _: () = client.ts_alter("foo{1}", None, None, None, ("b", "c")).await?; Ok(()) } -pub async fn should_madd_and_mget(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.ts_create("foo{1}", None, None, None, None, ("a", "b")).await?; - client.ts_create("bar{1}", None, None, None, None, ("a", "b")).await?; +pub async fn should_madd_and_mget(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.ts_create("foo{1}", None, None, None, None, ("a", "b")).await?; + let _: () = client.ts_create("bar{1}", None, None, None, None, ("a", "b")).await?; let values = vec![ ("foo{1}", 1, 1.1), @@ -95,7 +91,7 @@ pub async fn should_madd_and_mget(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_incr_and_decr(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_incr_and_decr(client: Client, _: Config) -> Result<(), Error> { // taken from the docs let timestamp: i64 = client .ts_incrby( @@ -137,30 +133,30 @@ pub async fn should_incr_and_decr(client: RedisClient, _: RedisConfig) -> Result Ok(()) } -pub async fn should_create_and_delete_rules(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client +pub async fn should_create_and_delete_rules(client: Client, _: Config) -> Result<(), Error> { + let _: () = client .ts_create("temp:TLV", None, None, None, None, [ ("type", "temp"), ("location", "TLV"), ]) .await?; - client + let _: () = client .ts_create("dailyAvgTemp:TLV", None, None, None, None, [ ("type", "temp"), ("location", "TLV"), ]) .await?; - client + let _: () = client .ts_createrule("temp:TLV", "dailyAvgTemp:TLV", (Aggregator::TWA, 86400000), None) .await?; - client.ts_deleterule("temp:TLV", "dailyAvgTemp:TLV").await?; + let _: () = client.ts_deleterule("temp:TLV", "dailyAvgTemp:TLV").await?; Ok(()) } -pub async fn should_madd_and_mrange(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.ts_create("foo{1}", None, None, None, None, ("a", "b")).await?; - client.ts_create("bar{1}", None, None, None, None, ("a", "b")).await?; +pub async fn should_madd_and_mrange(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.ts_create("foo{1}", None, None, None, None, ("a", "b")).await?; + let _: () = client.ts_create("bar{1}", None, None, None, None, ("a", "b")).await?; let values = vec![ ("foo{1}", 1, 1.1), @@ -255,7 +251,7 @@ pub async fn should_madd_and_mrange(client: RedisClient, _: RedisConfig) -> Resu // // TODO add another TimeSeriesValues type alias for this? - let samples: HashMap, Vec, Vec<(i64, f64)>)> = client + let samples: HashMap, Vec, Vec<(i64, f64)>)> = client .ts_mrange( "-", "+", @@ -275,7 +271,7 @@ pub async fn should_madd_and_mrange(client: RedisClient, _: RedisConfig) -> Resu "foo{1}".to_string(), ( vec![("a".to_string(), "b".to_string())], - vec!["aggregators".as_bytes().into(), RedisValue::Array(vec![])], + vec!["aggregators".as_bytes().into(), Value::Array(vec![])], vec![(1, 1.1), (2, 2.2), (3, 3.3)], ), ); @@ -283,7 +279,7 @@ pub async fn should_madd_and_mrange(client: RedisClient, _: RedisConfig) -> Resu "bar{1}".to_string(), ( vec![("a".to_string(), "b".to_string())], - vec!["aggregators".as_bytes().into(), RedisValue::Array(vec![])], + vec!["aggregators".as_bytes().into(), Value::Array(vec![])], vec![(1, 1.2), (2, 2.3)], ), ); @@ -293,9 +289,9 @@ pub async fn should_madd_and_mrange(client: RedisClient, _: RedisConfig) -> Resu Ok(()) } -pub async fn should_madd_and_mrevrange(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.ts_create("foo{1}", None, None, None, None, ("a", "b")).await?; - client.ts_create("bar{1}", None, None, None, None, ("a", "b")).await?; +pub async fn should_madd_and_mrevrange(client: Client, _: Config) -> Result<(), Error> { + let _: () = client.ts_create("foo{1}", None, None, None, None, ("a", "b")).await?; + let _: () = client.ts_create("bar{1}", None, None, None, None, ("a", "b")).await?; let values = vec![ ("foo{1}", 1, 1.1), @@ -340,7 +336,7 @@ pub async fn should_madd_and_mrevrange(client: RedisClient, _: RedisConfig) -> R } else { // see the mrange test above for more info on this section - let samples: HashMap, Vec, Vec<(i64, f64)>)> = client + let samples: HashMap, Vec, Vec<(i64, f64)>)> = client .ts_mrevrange( "-", "+", @@ -360,7 +356,7 @@ pub async fn should_madd_and_mrevrange(client: RedisClient, _: RedisConfig) -> R "foo{1}".to_string(), ( vec![("a".to_string(), "b".to_string())], - vec!["aggregators".as_bytes().into(), RedisValue::Array(vec![])], + vec!["aggregators".as_bytes().into(), Value::Array(vec![])], vec![(3, 3.3), (2, 2.2), (1, 1.1)], ), ); @@ -368,7 +364,7 @@ pub async fn should_madd_and_mrevrange(client: RedisClient, _: RedisConfig) -> R "bar{1}".to_string(), ( vec![("a".to_string(), "b".to_string())], - vec!["aggregators".as_bytes().into(), RedisValue::Array(vec![])], + vec!["aggregators".as_bytes().into(), Value::Array(vec![])], vec![(2, 2.3), (1, 1.2)], ), ); diff --git a/tests/integration/tracking/mod.rs b/tests/integration/tracking/mod.rs index ba2b458c..4d82eb68 100644 --- a/tests/integration/tracking/mod.rs +++ b/tests/integration/tracking/mod.rs @@ -1,6 +1,6 @@ use fred::{ prelude::*, - types::{RedisKey, RespVersion}, + types::{Key, RespVersion}, }; use std::{ sync::{ @@ -13,12 +13,12 @@ use tokio::time::sleep; #[allow(dead_code)] #[cfg(feature = "i-keys")] -pub async fn should_invalidate_foo_resp3(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_invalidate_foo_resp3(client: Client, _: Config) -> Result<(), Error> { if client.protocol_version() == RespVersion::RESP2 { return Ok(()); } - let key: RedisKey = "foo{1}".into(); + let key: Key = "foo{1}".into(); let invalidated = Arc::new(AtomicBool::new(false)); let _invalidated = invalidated.clone(); @@ -32,12 +32,12 @@ pub async fn should_invalidate_foo_resp3(client: RedisClient, _: RedisConfig) -> }); client.start_tracking(None, false, false, false, false).await?; - client.get("foo{1}").await?; - client.incr("foo{1}").await?; + let _: () = client.get("foo{1}").await?; + let _: () = client.incr("foo{1}").await?; - client.mget(vec!["bar{1}", "baz{1}"]).await?; - client.mset(vec![("bar{1}", 1), ("baz{1}", 1)]).await?; - client.flushall(false).await?; + let _: () = client.mget(vec!["bar{1}", "baz{1}"]).await?; + let _: () = client.mset(vec![("bar{1}", 1), ("baz{1}", 1)]).await?; + let _: () = client.flushall(false).await?; sleep(Duration::from_secs(1)).await; if invalidated.load(Ordering::Acquire) { @@ -49,12 +49,12 @@ pub async fn should_invalidate_foo_resp3(client: RedisClient, _: RedisConfig) -> #[allow(dead_code)] #[cfg(feature = "i-keys")] -pub async fn should_invalidate_foo_resp2_centralized(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { +pub async fn should_invalidate_foo_resp2_centralized(client: Client, _: Config) -> Result<(), Error> { if client.protocol_version() == RespVersion::RESP3 || client.is_clustered() { return Ok(()); } - let key: RedisKey = "foo{1}".into(); + let key: Key = "foo{1}".into(); let subscriber = client.clone_new(); subscriber.connect(); subscriber.wait_for_connect().await?; @@ -74,27 +74,26 @@ pub async fn should_invalidate_foo_resp2_centralized(client: RedisClient, _: Red let (_, subscriber_id) = subscriber .connection_ids() - .await .into_iter() .next() .expect("Failed to read subscriber connection ID"); - client + let _: () = client .client_tracking("on", Some(subscriber_id), None, false, false, false, false) .await?; - // verify that we get 2 keys in the invalidation message, or at least make sure that doesnt panic + // verify that we get 2 keys in the invalidation message, or at least make sure that doesn't panic // in resp2 this might take some changes to the pubsub parser if it doesn't work with an array as the message type // check pubsub messages with one key - client.get("foo{1}").await?; - client.incr("foo{1}").await?; + let _: () = client.get("foo{1}").await?; + let _: () = client.incr("foo{1}").await?; // check pubsub messages with an array of keys - client.mget(vec!["bar{1}", "baz{1}"]).await?; - client.mset(vec![("bar{1}", 1), ("baz{1}", 1)]).await?; + let _: () = client.mget(vec!["bar{1}", "baz{1}"]).await?; + let _: () = client.mset(vec![("bar{1}", 1), ("baz{1}", 1)]).await?; // check pubsub messages with a null key - client.flushall(false).await?; + let _: () = client.flushall(false).await?; sleep(Duration::from_secs(1)).await; if invalidated.load(Ordering::Acquire) { diff --git a/tests/integration/utils.rs b/tests/integration/utils.rs index af544e97..20c47333 100644 --- a/tests/integration/utils.rs +++ b/tests/integration/utils.rs @@ -5,32 +5,45 @@ #![allow(clippy::match_like_matches_macro)] use fred::{ - clients::RedisClient, - error::RedisError, + clients::Client, + error::Error, interfaces::*, types::{ + config::{ + ClusterDiscoveryPolicy, + Config, + ConnectionConfig, + PerformanceConfig, + ReconnectPolicy, + Server, + ServerConfig, + UnresponsiveConfig, + }, Builder, - ConnectionConfig, - PerformanceConfig, - ReconnectPolicy, - RedisConfig, - Server, - ServerConfig, - UnresponsiveConfig, + ConnectHandle, + InfoKind, }, }; use redis_protocol::resp3::types::RespVersion; -use std::{convert::TryInto, default::Default, env, fmt, fmt::Formatter, fs, future::Future, time::Duration}; +use std::{ + convert::TryInto, + default::Default, + env, + fmt, + fmt::{Debug, Formatter}, + fs, + future::Future, + time::Duration, +}; const RECONNECT_DELAY: u32 = 1000; -use fred::types::{ClusterDiscoveryPolicy, InfoKind}; #[cfg(any( feature = "enable-rustls", feature = "enable-native-tls", feature = "enable-rustls-ring" ))] -use fred::types::{TlsConfig, TlsConnector, TlsHostMapping}; +use fred::types::config::{TlsConfig, TlsConnector, TlsHostMapping}; #[cfg(feature = "enable-native-tls")] use tokio_native_tls::native_tls::{ Certificate as NativeTlsCertificate, @@ -192,7 +205,7 @@ struct TlsCreds { feature = "enable-native-tls", feature = "enable-rustls-ring" ))] -fn check_file_contents(value: &Vec, msg: &str) { +fn check_file_contents(value: &[u8], msg: &str) { if value.is_empty() { panic!("Invalid empty TLS file: {}", msg); } @@ -244,7 +257,7 @@ fn create_rustls_config() -> TlsConnector { let creds = read_tls_creds(); let mut root_store = RootCertStore::empty(); - let _ = root_store + root_store .add(creds.root_cert_der.clone().into()) .expect("Failed adding to rustls root cert store"); @@ -303,8 +316,8 @@ fn create_server_config(cluster: bool) -> ServerConfig { } } -fn create_normal_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (RedisConfig, PerformanceConfig) { - let config = RedisConfig { +fn create_normal_redis_config(cluster: bool, resp3: bool) -> (Config, PerformanceConfig) { + let config = Config { fail_fast: read_fail_fast_env(), server: create_server_config(cluster), version: if resp3 { RespVersion::RESP3 } else { RespVersion::RESP2 }, @@ -313,7 +326,6 @@ fn create_normal_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (Re ..Default::default() }; let perf = PerformanceConfig { - auto_pipeline: pipeline, default_command_timeout: Duration::from_secs(20), ..Default::default() }; @@ -326,30 +338,30 @@ fn create_normal_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (Re feature = "enable-native-tls", feature = "enable-rustls-ring" )))] -fn create_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (RedisConfig, PerformanceConfig) { - create_normal_redis_config(cluster, pipeline, resp3) +fn create_redis_config(cluster: bool, resp3: bool) -> (Config, PerformanceConfig) { + create_normal_redis_config(cluster, resp3) } #[cfg(all( feature = "enable-native-tls", any(feature = "enable-rustls", feature = "enable-rustls-ring") ))] -fn create_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (RedisConfig, PerformanceConfig) { +fn create_redis_config(cluster: bool, resp3: bool) -> (Config, PerformanceConfig) { // if both are enabled then don't use either since all the tests assume one or the other - create_normal_redis_config(cluster, pipeline, resp3) + create_normal_redis_config(cluster, resp3) } #[cfg(all( any(feature = "enable-rustls", feature = "enable-rustls-ring"), not(feature = "enable-native-tls") ))] -fn create_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (RedisConfig, PerformanceConfig) { +fn create_redis_config(cluster: bool, resp3: bool) -> (Config, PerformanceConfig) { if !read_ci_tls_env() { - return create_normal_redis_config(cluster, pipeline, resp3); + return create_normal_redis_config(cluster, resp3); } debug!("Creating rustls test config..."); - let config = RedisConfig { + let config = Config { fail_fast: read_fail_fast_env(), server: create_server_config(cluster), version: if resp3 { RespVersion::RESP3 } else { RespVersion::RESP2 }, @@ -362,7 +374,6 @@ fn create_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (RedisConf ..Default::default() }; let perf = PerformanceConfig { - auto_pipeline: pipeline, default_command_timeout: Duration::from_secs(20), ..Default::default() }; @@ -374,13 +385,13 @@ fn create_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (RedisConf feature = "enable-native-tls", not(any(feature = "enable-rustls", feature = "enable-rustls-ring")) ))] -fn create_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (RedisConfig, PerformanceConfig) { +fn create_redis_config(cluster: bool, resp3: bool) -> (Config, PerformanceConfig) { if !read_ci_tls_env() { - return create_normal_redis_config(cluster, pipeline, resp3); + return create_normal_redis_config(cluster, resp3); } debug!("Creating native-tls test config..."); - let config = RedisConfig { + let config = Config { fail_fast: read_fail_fast_env(), server: create_server_config(cluster), version: if resp3 { RespVersion::RESP3 } else { RespVersion::RESP2 }, @@ -393,7 +404,6 @@ fn create_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (RedisConf ..Default::default() }; let perf = PerformanceConfig { - auto_pipeline: pipeline, default_command_timeout: Duration::from_secs(20), ..Default::default() }; @@ -401,7 +411,7 @@ fn create_redis_config(cluster: bool, pipeline: bool, resp3: bool) -> (RedisConf (config, perf) } -async fn flushall_between_tests(client: &RedisClient) -> Result<(), RedisError> { +async fn flushall_between_tests(client: &Client) -> Result<(), Error> { if should_flushall_between_tests() { client.flushall_cluster().await } else { @@ -409,14 +419,21 @@ async fn flushall_between_tests(client: &RedisClient) -> Result<(), RedisError> } } -pub async fn run_sentinel(func: F, pipeline: bool, resp3: bool) +async fn check_panic(client: &Client, jh: ConnectHandle, err: Error) { + println!("Checking panic after: {:?}", err); + let _ = client.quit().await; + jh.await.unwrap().unwrap(); + panic!("{:?}", err); +} + +pub async fn run_sentinel(func: F, resp3: bool) where - F: Fn(RedisClient, RedisConfig) -> Fut, - Fut: Future>, + F: Fn(Client, Config) -> Fut, + Fut: Future>, { let policy = ReconnectPolicy::new_constant(300, RECONNECT_DELAY); let connection = ConnectionConfig::default(); - let config = RedisConfig { + let config = Config { fail_fast: read_fail_fast_env(), version: if resp3 { RespVersion::RESP3 } else { RespVersion::RESP2 }, server: ServerConfig::Sentinel { @@ -430,29 +447,31 @@ where password: Some(read_redis_password()), ..Default::default() }; - let perf = PerformanceConfig { - auto_pipeline: pipeline, - ..Default::default() - }; - let client = RedisClient::new(config.clone(), Some(perf), Some(connection), Some(policy)); + let perf = PerformanceConfig::default(); + let client = Client::new(config.clone(), Some(perf), Some(connection), Some(policy)); let _client = client.clone(); - let _jh = client.connect(); + let jh = client.connect(); client.wait_for_connect().await.expect("Failed to connect client"); - flushall_between_tests(&client).await.expect("Failed to flushall"); - func(_client, config.clone()).await.expect("Failed to run test"); - let _ = client.quit().await; + if let Err(err) = flushall_between_tests(&client).await { + check_panic(&client, jh, err).await; + } else if let Err(err) = func(_client, config.clone()).await { + check_panic(&client, jh, err).await; + } else { + let _ = client.quit().await; + jh.await.unwrap().unwrap(); + } } -pub async fn run_cluster(func: F, pipeline: bool, resp3: bool) +pub async fn run_cluster(func: F, resp3: bool) where - F: Fn(RedisClient, RedisConfig) -> Fut, - Fut: Future>, + F: Fn(Client, Config) -> Fut, + Fut: Future>, { let (policy, cmd_attempts, fail_fast) = reconnect_settings(); let mut connection = ConnectionConfig::default(); - let (mut config, perf) = create_redis_config(true, pipeline, resp3); + let (mut config, perf) = create_redis_config(true, resp3); connection.max_command_attempts = cmd_attempts; connection.max_redirections = 10; connection.unresponsive = UnresponsiveConfig { @@ -461,29 +480,34 @@ where }; config.fail_fast = fail_fast; - let client = RedisClient::new(config.clone(), Some(perf), Some(connection), policy); + let client = Client::new(config.clone(), Some(perf), Some(connection), policy); let _client = client.clone(); - let _jh = client.connect(); + let jh = client.connect(); client.wait_for_connect().await.expect("Failed to connect client"); - flushall_between_tests(&client).await.expect("Failed to flushall"); - func(_client, config.clone()).await.expect("Failed to run test"); - let _ = client.quit().await; + if let Err(err) = flushall_between_tests(&client).await { + check_panic(&client, jh, err).await; + } else if let Err(err) = func(_client, config.clone()).await { + check_panic(&client, jh, err).await; + } else { + let _ = client.quit().await; + jh.await.unwrap().unwrap(); + } } -pub async fn run_centralized(func: F, pipeline: bool, resp3: bool) +pub async fn run_centralized(func: F, resp3: bool) where - F: Fn(RedisClient, RedisConfig) -> Fut, - Fut: Future>, + F: Fn(Client, Config) -> Fut, + Fut: Future>, { if should_use_sentinel_config() { - return run_sentinel(func, pipeline, resp3).await; + return run_sentinel(func, resp3).await; } let (policy, cmd_attempts, fail_fast) = reconnect_settings(); let mut connection = ConnectionConfig::default(); - let (mut config, perf) = create_redis_config(false, pipeline, resp3); + let (mut config, perf) = create_redis_config(false, resp3); connection.max_command_attempts = cmd_attempts; connection.unresponsive = UnresponsiveConfig { max_timeout: Some(Duration::from_secs(10)), @@ -491,19 +515,24 @@ where }; config.fail_fast = fail_fast; - let client = RedisClient::new(config.clone(), Some(perf), Some(connection), policy); + let client = Client::new(config.clone(), Some(perf), Some(connection), policy); let _client = client.clone(); - let _jh = client.connect(); + let jh = client.connect(); client.wait_for_connect().await.expect("Failed to connect client"); - flushall_between_tests(&client).await.expect("Failed to flushall"); - func(_client, config.clone()).await.expect("Failed to run test"); - let _ = client.quit().await; + if let Err(err) = flushall_between_tests(&client).await { + check_panic(&client, jh, err).await; + } else if let Err(err) = func(_client, config.clone()).await { + check_panic(&client, jh, err).await; + } else { + let _ = client.quit().await; + jh.await.unwrap().unwrap(); + } } /// Check whether the server is Valkey. -pub async fn check_valkey(client: &RedisClient) -> bool { +pub async fn check_valkey(client: &Client) -> bool { let info: String = match client.info(Some(InfoKind::Server)).await { Ok(val) => val, Err(e) => { @@ -526,52 +555,26 @@ macro_rules! centralized_test_panic( ($module:tt, $name:tt) => { #[cfg(not(any(feature = "enable-rustls", feature = "enable-native-tls", feature = "enable-rustls-ring")))] mod $name { - mod resp2 { - #[tokio::test(flavor = "multi_thread")] - #[should_panic] - async fn pipelined() { - if crate::integration::utils::read_ci_tls_env() { - panic!(""); - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, true, false).await; + #[tokio::test(flavor = "multi_thread")] + #[should_panic] + async fn resp2() { + if crate::integration::utils::read_ci_tls_env() { + panic!(""); } - #[tokio::test(flavor = "multi_thread")] - #[should_panic] - async fn no_pipeline() { - if crate::integration::utils::read_ci_tls_env() { - panic!(""); - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, false, false).await; - } + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, false).await; } - mod resp3 { - #[tokio::test(flavor = "multi_thread")] - #[should_panic] - async fn pipelined() { - if crate::integration::utils::read_ci_tls_env() { - panic!(""); - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, true, true).await; + #[tokio::test(flavor = "multi_thread")] + #[should_panic] + async fn resp3() { + if crate::integration::utils::read_ci_tls_env() { + panic!(""); } - #[tokio::test(flavor = "multi_thread")] - #[should_panic] - async fn no_pipeline() { - if crate::integration::utils::read_ci_tls_env() { - panic!(""); - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, false, true).await; - } + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, true).await; } } } @@ -581,53 +584,27 @@ macro_rules! cluster_test_panic( ($module:tt, $name:tt) => { mod $name { #[cfg(not(any(feature = "i-redis-stack", feature = "unix-sockets")))] - mod resp2 { - #[tokio::test(flavor = "multi_thread")] - #[should_panic] - async fn pipelined() { - if crate::integration::utils::should_use_sentinel_config() { - panic!(""); - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, true, false).await; + #[tokio::test(flavor = "multi_thread")] + #[should_panic] + async fn resp2() { + if crate::integration::utils::should_use_sentinel_config() { + panic!(""); } - #[tokio::test(flavor = "multi_thread")] - #[should_panic] - async fn no_pipeline() { - if crate::integration::utils::should_use_sentinel_config() { - panic!(""); - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, false, false).await; - } + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, false).await; } #[cfg(not(any(feature = "i-redis-stack", feature = "unix-sockets")))] - mod resp3 { - #[tokio::test(flavor = "multi_thread")] - #[should_panic] - async fn pipelined() { - if crate::integration::utils::should_use_sentinel_config() { - panic!(""); - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, true, true).await; + #[tokio::test(flavor = "multi_thread")] + #[should_panic] + async fn resp3() { + if crate::integration::utils::should_use_sentinel_config() { + panic!(""); } - #[tokio::test(flavor = "multi_thread")] - #[should_panic] - async fn no_pipeline() { - if crate::integration::utils::should_use_sentinel_config() { - panic!(""); - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, false, true).await; - } + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, true).await; } } } @@ -637,48 +614,24 @@ macro_rules! centralized_test( ($module:tt, $name:tt) => { #[cfg(not(any(feature = "enable-rustls", feature = "enable-native-tls", feature = "enable-rustls-ring")))] mod $name { - mod resp2 { - #[tokio::test(flavor = "multi_thread")] - async fn pipelined() { - if crate::integration::utils::read_ci_tls_env() { - return; - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, true, false).await; + #[tokio::test(flavor = "multi_thread")] + async fn resp2() { + if crate::integration::utils::read_ci_tls_env() { + return; } - #[tokio::test(flavor = "multi_thread")] - async fn no_pipeline() { - if crate::integration::utils::read_ci_tls_env() { - return; - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, false, false).await; - } + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, false).await; } - mod resp3 { - #[tokio::test(flavor = "multi_thread")] - async fn pipelined() { - if crate::integration::utils::read_ci_tls_env() { - return; - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, true, true).await; + #[tokio::test(flavor = "multi_thread")] + async fn resp3() { + if crate::integration::utils::read_ci_tls_env() { + return; } - #[tokio::test(flavor = "multi_thread")] - async fn no_pipeline() { - if crate::integration::utils::read_ci_tls_env() { - return; - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, false, true).await; - } + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, true).await; } } } @@ -688,49 +641,25 @@ macro_rules! cluster_test( ($module:tt, $name:tt) => { mod $name { #[cfg(not(any(feature = "i-redis-stack", feature = "unix-sockets")))] - mod resp2 { - #[tokio::test(flavor = "multi_thread")] - async fn pipelined() { - if crate::integration::utils::should_use_sentinel_config() { - return; - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, true, false).await; + #[tokio::test(flavor = "multi_thread")] + async fn resp2() { + if crate::integration::utils::should_use_sentinel_config() { + return; } - #[tokio::test(flavor = "multi_thread")] - async fn no_pipeline() { - if crate::integration::utils::should_use_sentinel_config() { - return; - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, false, false).await; - } + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, false).await; } #[cfg(not(any(feature = "i-redis-stack", feature = "unix-sockets")))] - mod resp3 { - #[tokio::test(flavor = "multi_thread")] - async fn pipelined() { - if crate::integration::utils::should_use_sentinel_config() { - return; - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, true, true).await; + #[tokio::test(flavor = "multi_thread")] + async fn resp3() { + if crate::integration::utils::should_use_sentinel_config() { + return; } - #[tokio::test(flavor = "multi_thread")] - async fn no_pipeline() { - if crate::integration::utils::should_use_sentinel_config() { - return; - } - - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, false, true).await; - } + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, true).await; } } } @@ -738,8 +667,8 @@ macro_rules! cluster_test( macro_rules! return_err( ($($arg:tt)*) => { { - return Err(fred::error::RedisError::new( - fred::error::RedisErrorKind::Unknown, format!($($arg)*) + return Err(fred::error::Error::new( + fred::error::ErrorKind::Unknown, format!($($arg)*) )); } } ); diff --git a/tests/runners/docker-bash.sh b/tests/runners/docker-bash.sh index 680b2d26..44db6dbf 100755 --- a/tests/runners/docker-bash.sh +++ b/tests/runners/docker-bash.sh @@ -7,7 +7,7 @@ docker-compose -f tests/docker/compose/cluster-tls.yml \ -f tests/docker/compose/cluster.yml \ -f tests/docker/compose/sentinel.yml \ -f tests/docker/compose/redis-stack.yml \ - -f tests/docker/compose/valkey-centralized.yml \ + -f tests/docker/compose/centralized.yml \ -f tests/docker/compose/valkey-cluster.yml \ - -f tests/docker/compose/base.yml run -u $(id -u ${USER}):$(id -g ${USER}) --rm debug + -f tests/docker/compose/base.yml run --rm debug