diff --git a/.circleci/Dockerfile.sentinel b/.circleci/Dockerfile.sentinel index a56ebfd6..da8b54d4 100644 --- a/.circleci/Dockerfile.sentinel +++ b/.circleci/Dockerfile.sentinel @@ -13,6 +13,7 @@ ARG REDIS_VERSION ARG REDIS_USERNAME ARG REDIS_PASSWORD ARG REDIS_SENTINEL_PASSWORD +ARG FRED_CI_NEXTEST # try to make the new container look like the host WORKDIR /home/circleci/project @@ -25,5 +26,6 @@ RUN .circleci/install_redis_cli.sh # For debugging RUN cargo --version && rustc --version +RUN cargo install cargo-nextest CMD tests/runners/sentinel-features.sh \ No newline at end of file diff --git a/.circleci/config.yml b/.circleci/config.yml index 3b276e5f..989a1a78 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -14,6 +14,9 @@ jobs: - run: name: Install redis cluster command: tests/scripts/install_redis_clustered.sh + - run: + name: Install cargo-nextest + command: tests/scripts/install_cargo_nextest.sh - run: name: Clear the cargo git cache command: rm -rf ~/.cargo/git/* && rm -rf ~/.cargo/registry/cache/* @@ -34,6 +37,9 @@ jobs: - run: name: Install redis cluster command: tests/scripts/install_redis_clustered.sh + - run: + name: Install cargo-nextest + command: tests/scripts/install_cargo_nextest.sh - run: name: Clear the cargo git cache command: rm -rf ~/.cargo/git/* && rm -rf ~/.cargo/registry/cache/* @@ -51,6 +57,9 @@ jobs: - run: name: Install redis cluster command: tests/scripts/install_redis_clustered.sh + - run: + name: Install cargo-nextest + command: tests/scripts/install_cargo_nextest.sh - run: name: Clear the cargo git cache command: rm -rf ~/.cargo/git/* && rm -rf ~/.cargo/registry/cache/* @@ -74,6 +83,17 @@ jobs: - run: name: Install and run sentinel tests command: docker-compose -f tests/sentinel-docker-compose.yml -f .circleci/sentinel-compose.yml run sentinel-tests + test-docs: + docker: + - image: cimg/rust:1.57.0 + steps: + - checkout + - run: + name: Install nightly + command: rustup install nightly + - run: + name: Build documentation + command: tests/doc.sh workflows: version: 2 @@ -82,4 +102,5 @@ workflows: - test-default - test-all-features - test-no-features - - test-sentinel \ No newline at end of file + - test-sentinel + - test-docs \ No newline at end of file diff --git a/.circleci/sentinel-compose.yml b/.circleci/sentinel-compose.yml index f43b4cbb..256cada0 100644 --- a/.circleci/sentinel-compose.yml +++ b/.circleci/sentinel-compose.yml @@ -15,6 +15,7 @@ services: REDIS_USERNAME: "${REDIS_USERNAME}" REDIS_PASSWORD: "${REDIS_PASSWORD}" REDIS_SENTINEL_PASSWORD: "${REDIS_SENTINEL_PASSWORD}" + FRED_CI_NEXTEST: "${FRED_CI_NEXTEST}" networks: - app-tier environment: @@ -22,4 +23,5 @@ services: REDIS_VERSION: "${REDIS_VERSION}" REDIS_USERNAME: "${REDIS_USERNAME}" REDIS_PASSWORD: "${REDIS_PASSWORD}" - REDIS_SENTINEL_PASSWORD: "${REDIS_SENTINEL_PASSWORD}" \ No newline at end of file + REDIS_SENTINEL_PASSWORD: "${REDIS_SENTINEL_PASSWORD}" + FRED_CI_NEXTEST: "${FRED_CI_NEXTEST}" \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index d0931cc8..11051e16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,31 @@ +TLDR +==== + +* Versions 3.x were focused on upgrading to Tokio 1.x and async/await +* Versions 4.x were focused on ergonomics and testing +* Versions 5.x are focused on feature parity with newer Redis features (streams, RESP3, etc) +* Versions 6.x will be focused on performance. + +## 5.0.0-beta.1 + +* Rewrite the [protocol parser](https://github.com/aembke/redis-protocol.rs) so it can decode frames without moving or copying the underlying bytes +* Change most command implementations to avoid unnecessary allocations when using static str slices +* Rewrite the public interface to use different traits for different parts of the redis interface +* Relax some restrictions on certain commands being used in a transaction +* Implement the Streams interface (XADD, XREAD, etc) +* RESP3 support +* Minor perf improvements via the removal of some locks... +* Minor perf regressions from workarounds required to use [async functions with traits](https://smallcultfollowing.com/babysteps/blog/2019/10/26/async-fn-in-traits-are-hard/). In the end it's a wash. +* Move most perf configuration options from `globals` to client-specific config structs +* Add backpressure configuration options to the client config struct +* Fix bugs that can occur when using non-UTF8 byte arrays as keys +* Add the `serde-json` feature +* Handle more complicated failure modes with Redis clusters +* Add a more robust and specialized pubsub subscriber client +* Ergonomics improvements on the public interfaces +* Improve docs +* More tests + ## 4.3.2 * Fix https://github.com/aembke/fred.rs/issues/27 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8d5ec684..5f21d6c7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,45 +11,23 @@ This document gives some background on how the library is structured and how to ## TODO List -* Streams -* [WIP] RESP3 support (see [redis-protocol](https://crates.io/crates/redis-protocol)). -* Redis version 7.x commands -* Mocking layer -* Gate commands unique to a particular Redis version behind build time features. -* [WIP] Support custom DNS resolvers on the client. - -If you'd like to contribute to any of the above features feel free to reach out - -### Next Major Release - -The next major release (5.0.0) will include the following: - -* RESP3 support (this will result in breaking changes to nearly all response types due to value attributes being added) -* Move several global config options to the `RedisConfig` struct. Currently there are some use cases where current global options would work better if they were client-specific -* Improved error types and messages -* Remove or collapse several compile-time features. For example, the `sentinel-auth` feature will become the default interface, etc. -* Replace some configuration struct locks with `ArcSwap` -* Collapse the different pool types to one pool type that can dynamically scale while supporting client use via the `Deref` trait. -* Switch from `Arc` to `ArcStr` -* Publish benchmarks and run them during CI. The closest thing to that currently is in the [pipeline_test](bin/pipeline_test) module. -* Lots of code cleanup and refactoring. - -In addition, in 5.1.0 the [streams](https://redis.io/topics/streams-intro) interface will be added. This will likely include the following: - -* A lower level interface to use the X* interface directly. -* An optional, higher level client interface to manage the strange ways that stream subscriptions can interact with reconnect/retry. This will likely look a lot like a Kafka client. - -Finally, the 5.2.0 release will add [client tracking](https://redis.io/topics/client-side-caching) support built into the client (behind a new feature). +* Support custom DNS resolvers on the client. +* Add a FF that uses rustls instead of tokio-native-tls +* Add a FF for redis v7 changes (GET, XAUTOCLAIM, etc) +* Any missing commands. +* Support unix domain sockets +* Switch to `ArcStr` from `Arc` for map identifiers in the multiplexer. +* General cleanup and refactoring. A lot of the lower level logic was written before async/await, before `impl Trait`, and before NLLs. It could certainly be more modern and generic. ## Design This section covers some useful design considerations and assumptions that went into this module. * Debugging Redis issues late at night is not fun. If you find yourself adding log lines that help debug an issue please clean them up and leave them in the code. The one exception is that logs should **never** include potentially sensitive user data (i.e. don't commit changes that log full requests or responses). The `network-logs` feature can enable sensitive logs if needed. -* The `RedisClient` struct needs to be `Send + Sync` to work effectively with Tokio. -* The `RedisClient` struct should be fast and cheap to `Clone`. +* Any client struct needs to be `Send + Sync` to work effectively with Tokio. +* Any client struct should be fast and cheap to `Clone`. * The primary command interfaces should be as flexible as possible via use of `Into` and `TryInto` for arguments. -* Assume nearly any command might be used in the context of a transaction, and so it could return a `QUEUED` response even if the docs only mention bulk strings, arrays, etc. There are some exceptions to this (blocking commands, etc) where return values could be typed to exactly match the rust-equivalent type of the return value, but generally speaking every command should return a `RedisValue`. +* Assume nearly any command might be used in the context of a transaction, and so it could return a `QUEUED` response even if the docs only mention bulk strings, arrays, etc. There are some exceptions to this where return values could be typed to exactly match the rust-equivalent type of the return value, but generally speaking every command should return a `RedisValue`. There are other Redis libraries for Rust that have different goals, but the main goal of this library is to provide callers with a high level interface that abstracts away everything to do with safe and reliable connection management. This also includes some optional features to automatically handle common use cases around error handling, reconnection & backoff, retry, metrics, etc. @@ -66,10 +44,12 @@ to create a [Framed](https://docs.rs/tokio-util/0.6.7/tokio_util/codec/struct.Fr * If TLS features are disabled the TLS type aliases become references to the TCP types. * The writer half exposes 2 functions for sending frames to the socket: [feed](https://docs.rs/futures/0.3.16/futures/sink/trait.SinkExt.html#method.feed) and [send](https://docs.rs/futures/0.3.16/futures/sink/trait.SinkExt.html#method.send). `send` flushes the socket and `feed` does not. The client will use `send` under any of the following conditions: 1. There are no queued commands following the current command. - 2. The [global max number of fed commands](src/globals.rs) was reached. In this case the client will use `send` once and reset the feed counter. + 2. The max number of fed commands (`max_feed_count`) was reached. In this case the client will use `send` once and reset the feed counter. 3. The current command is QUIT or SHUTDOWN. 4. The current command ends a transaction. 5. The client has pipelining disabled. + +**All frames are automatically converted to RESP3 frames, even in RESP2 mode, to provide a single interface for callers to parse responses.** This works because RESP3 is a superset of RESP2 and the type conversion logic accounts for the different possible representations of most data types. #### Clustered Connections @@ -113,7 +93,7 @@ In order to support use cases where a client may switch between these states at The `Multiplexer` struct stores the following state: * A connection map that maps server IDs to `Framed` sinks, or just one `Framed` sink when connected to a centralized Redis deployment. -* A map that maps server IDs to a queue of in-flight commands (`VecDeque`). This is a different map/queue because it has different locking requirements than the connection map. +* A map that maps server IDs to a queue of in-flight commands (`VecDeque`). This is a different map/queue because it has different locking requirements than the connection map. This is the only lock that should see any real contention, but it is a requirement to implement pipelining. * A [broadcast](https://docs.rs/tokio/1.9.0/tokio/sync/broadcast/index.html) sender used to broadcast connection issues from either the reader or writer half of the socket. * An instance of a `ClusterKeyCache` used to route commands to specific nodes in the cluster, if necessary. * An `Arc`, used to communicate messages back to the client when a connection dies, state changes, pubsub messages are received, etc. @@ -128,7 +108,7 @@ When the client sends a command to the server the following operations occur: 1. The client prepares the command, creating a `Vec` array of arguments. 2. The client attaches a [oneshot](https://docs.rs/tokio/1.9.0/tokio/sync/oneshot/index.html) sender to the command on which the response will be sent. -3. The client acquires a _read_ lock on the command channel and writes the command to this channel. +3. The client sends the command to the `command_tx` sender channel on the `RedisClientInner` struct. 4. The client calls `await` on the receiver half of the oneshot channel from step 2. 5. Some time later the `Multiplexer` receives the command from the command stream running in a separate Tokio task. 6. The `Multiplexer` checks the command's flags to determine if it makes sense to send in the current connection context. @@ -151,7 +131,7 @@ Once a connection is established the `Multiplexer` does the following: 1. Split the connection. The writer half is covered above. 2. Spawn a task with access to the reader half, a reference to the server ID to which the reader half is connected, and a shallow clone of the `Multiplexer`. -3. Convert the reader half to a `Stream`, calling [try_fold](https://docs.rs/futures/0.3.16/futures/stream/struct.TryFold.html) on it in the process. While this does mean the stream is processed in series the reader task never `awaits` a future so there wouldn't be any benefit of processing the stream concurrently on an event loop. By processing the stream in series it also makes it very easy to handle situations where the command should be retried, or reconnection needs to occur, since the reader task can just put the command back at the front of the in-flight queue without worrying about another task having popped from the queue in the meantime. +3. Convert the reader half to a `Stream`, calling [try_fold](https://docs.rs/futures/0.3.16/futures/stream/struct.TryFold.html) on it in the process. While this does mean the stream is processed in series the reader task never `awaits` a future so there wouldn't be any real benefit of processing the stream concurrently on an event loop. By processing the stream in series it also makes it very easy to handle situations where the command should be retried, or reconnection needs to occur, since the reader task can just put the command back at the front of the in-flight queue without worrying about another task having popped from the queue in the meantime. Inside the `try_fold` loop the reader task does the following: @@ -235,23 +215,48 @@ If a command should not work inside a transaction then the command should use th This section will cover how to add new commands to the client. +There are usually only 2 files that require modifications to add a command, although certain commands may require modifying 3 or 4 files. + +1. The appropriate [client](src/clients) file sometimes requires a line to implement a new interface trait. This is usually only the case when starting implementing the first command in a new command category. +2. The [interface file](src/commands/interfaces) almost always requires changes to implement the generic interface to the command. +3. The [implementation file](src/commands/impls) almost always requires changes to implement the actual command logic. +4. The [protocol types](src/protocol/types.rs) file sometimes requires changes to add a new variant to the `RedisCommandKind` enum. + ## New Commands When adding new commands a few new things often need to be added to the [protocol types](src/protocol/types.rs) file. -1. Add a variant to the `RedisCommandKind` enum for the command. For most commands this variant will be empty. +1. Add a variant to the `RedisCommandKind` enum for the command. For most commands this variant will be empty. However, if there are any special flags or state needed by the command that should go inside this new variant declaration. 2. Add the string representation of the new variant to the `to_str_debug` function. This is what will be used in tracing fields and log lines. 3. Add the first word of the string representation of the command to the `cmd_str` function. 4. If the command is a compound command add the subcommand string to the `subcommand_str` function. If not then skip this step. 5. If the command is a blocking command add it to the `is_blocking_command` function's match statement. +6. If the command uses a unique key structure, such as a set of keys at the end of the command args (like `XREAD`) it may be necessary to change the `custom_key_slot` function to account for this. This is very rare though since almost every command takes the key as the first argument. ## Command Files -Commands are organized in the [commands](src/commands) folder by their category. +Commands are organized in two folders in the [commands](src/commands) folder. + +The trait declarations exist in the [interfaces](src/commands/interfaces) folder by category, and the actual command implementation exists in the [impl](src/commands/impls) folder by category. The `interfaces` file often calls the associated function from the `impls` file. + +### Interfaces Folder + +These files contain the public interface declarations for subsets of the Redis interface. + +* Async functions are not supported in traits, so we return an `AsyncResult` from each of these functions. This struct implements the `Future` trait so callers can use it like an async function. +* Functions should take generic arguments to be flexible to the caller. Use `Into`, `TryInto`, etc. There are quite a few helper structs in the [types](src/types) folder to make this easier. +* These functions must convert generic arguments to the actual underlying types used by the associated [impl](src/commands/impls) file/function. The `into!()` and `try_into!()` macros can convert multiple types automatically, and are written to break out early with `AsyncResult` errors as needed. +* These functions should return generic response types in most cases. This usually means declaring the response as `FromRedis + Unpin + Send`. +* These functions should use the `async_spawn` function from the top-level [interfaces](src/interfaces.rs) file to call an async block from a non-async function. +* Contributors should add some docs from the Redis website (try to limit to one sentence or so), and a link to the actual command documentation. + +### Impls Folder + +These files contain the actual implementation details for each command. They are not called directly by users, but rather by the associated file/function in the [interface](src/commands/interfaces) file. * All private command functions in this folder take their first argument as a `&Arc`. This struct contains all the necessary state for any command. -* Commands should take generic arguments to be flexible to the caller. Use `Into`, `Into`, etc. There are quite a few helper structs in the [types](src/types.rs) file to make this easier. -* Some helpful command function generation macros exist in the [command mod.rs](src/commands/mod.rs) file to remove boilerplate for simple commands. +* These functions do not need to be written in a generic way. They can assume any callers will have converted values to any intermediate structs/enums. In the past they were written generically, so you may see that in some older code paths, but I'm trying to standardize these to use non-generic arguments going forward. +* Some helpful command function generation macros exist in the [command mod.rs](src/commands/impls/mod.rs) file to remove boilerplate for simple commands. * All commands should use the `request_response` utility function from the [top level utils file](src/utils.rs). * Private command functions are responsible for preparing their arguments array and converting the response frame to the appropriate return value type. * It should not be necessary to add any tracing logic to individual command functions. @@ -260,13 +265,15 @@ Commands are organized in the [commands](src/commands) folder by their category. There are 2 functions in the [protocol utils](src/protocol/utils.rs) for converting response frames into `RedisValue` enums. -* `frame_to_results` - Converts an arbitrarily nested response frame into an arbitrarily nested `RedisValue`, including support for `QUEUED` responses during a transaction. -* `frame_to_single_result` - The same as `frame_to_results`, but with an added validation layer that only allows for non-nested `RedisValue` variants. This is useful to detect unexpected protocol errors if a command should only return a `BulkString` but receives an `Array` instead. +* `frame_to_results` - Converts an arbitrarily nested response frame into an arbitrarily nested `RedisValue`, including support for `QUEUED` responses during a transaction. +* `frame_to_single_result` - The same as `frame_to_results`, but with an added validation layer that only allows for non-nested `RedisValue` variants. This is useful to detect unexpected protocol errors if a command should only return a `BulkString` but receives an `Array` instead, for example. Both of these functions will automatically check for error frames and will generate the appropriate `RedisError`, if necessary. **Both of these functions will automatically convert single-element response arrays to the first element in the array.** This is done because RESP2 sends all responses as an array of bulk strings, even when the response only contains one element in the array. It's up to the developer to consider when an array is an appropriate return type for a command. +Additionally, if callers need to avoid the logic that automatically unwraps single-element arrays they can use the `frame_to_results_raw` function, which will not modify responses in any way. + There are also some utility functions for converting to other data types: * `frame_to_map` - Convert a frame representing an array of nested frames with an even number of elements to a `RedisMap`. @@ -275,29 +282,24 @@ There are also some utility functions for converting to other data types: ... and some others. -## Public Interface +Additionally, the `convert` function on any `RedisValue` can convert to any type that implements `FromRedis`. See the [response type conversion file](src/modules/response.rs) for more information. -Once the private interface has been implemented the same function needs to be exposed on the `RedisClient` struct. +## Public Interface -1. Copy or create the same function signature in the `RedisClient` [struct](src/client.rs), but change the first argument to `&self`. -2. The Redis interface is huge, so please keep this file organized. -3. Use the `disallow_during_transaction` utility in this function, if necessary. -4. Call the private function from this public function, using `&self.inner` as the first argument. -5. Copy or write documentation for the command in this file. Some docs are quite long so most command docs simply include the first few sentences or a high level overview of the command. -6. Include a link to the full docs for the command from the [Redis docs website](https://redis.io/commands). +Once the trait interface function and private impl function have been implemented the same function needs to be exposed on any relevant client structs. -Moving from the private to public interface is a bit tedious, so I'm looking for better ways to do this. +In most cases this will not require any changes, but when adding a new command category it can. If needed callers should go to the relevant [client](src/clients) file and `impl` the new trait. ## Example -This example shows how to add `MSET` to the commands. +This example shows how to add `MGET` to the commands. 1. Add the new variant to the `RedisCommandKind` enum, if needed. ```rust pub enum RedisCommandKind { // ... - Mset, + Mget, // ... } @@ -308,7 +310,7 @@ impl RedisCommandKind { pub fn to_str_debug(&self) -> &'static str { match *self { // .. - RedisCommandKind::Mset => "MSET", + RedisCommandKind::Mget => "MGET", // .. } } @@ -318,7 +320,7 @@ impl RedisCommandKind { pub fn cmd_str(&self) -> &'static str { match *self { // .. - RedisCommandKind::Mset => "MSET" + RedisCommandKind::Mget => "MGET" // .. } } @@ -327,99 +329,80 @@ impl RedisCommandKind { } ``` -2. Create the private function implementing the command in [src/commands/keys.rs](src/commands/keys.rs). +2. Create the private function implementing the command in [src/commands/impls/keys.rs](src/commands/impls/keys.rs). ```rust -pub async fn mset(inner: &Arc, values: V) -> Result -where - V: Into, -{ - let values = values.into(); - if values.len() == 0 { - return Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "Values cannot be empty.", - )); - } +pub async fn mget(inner: &Arc, keys: MultipleKeys) -> Result { + utils::check_empty_keys(&keys)?; let frame = utils::request_response(inner, move || { - // this closure will appear in traces - let mut args = Vec::with_capacity(values.len() * 2); - - for (key, value) in values.inner().into_iter() { - args.push(key.into()); - args.push(value); - } - - Ok((RedisCommandKind::Mset, args)) + // time spent here will show up in traces + let args = keys.inner().into_iter().map(|k| k.into()).collect(); + Ok((RedisCommandKind::Mget, args)) }) .await?; - protocol_utils::frame_to_single_result(frame) + protocol_utils::frame_to_results(frame) } ``` -3. Create the public function in the [RedisClient](src/client.rs) struct. +3. Create the public function in the [src/commands/interfaces/keys.rs](src/commands/interfaces/keys.rs) file. ```rust // ... -impl RedisClient { +pub trait KeysInterface: ClientLike + Sized { // ... - /// Sets the given keys to their respective values. + /// Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned. /// - /// - pub async fn mset(&self, values: V) -> Result - where - V: Into, + /// + fn mget(&self, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, { - commands::keys::mset(&self.inner, values).await + into!(keys); + async_spawn(self, |inner| async move { + commands::keys::mget(&inner, keys).await?.convert() + }) } // ... } ``` +Finally, if the actual client struct (such as the `RedisClient`) doesn't already have a line to implement the `KeysInterface` then contributors need to add that. + +```rust +impl KeysInterface for RedisClient {} +``` + # Adding Tests Integration tests are in the [tests/integration](tests/integration) folder organized by category. See the tests [README](tests/README.md) for more information. -Using `MSET` as an example: +Using `MGET` as an example: 1. Write tests in the [keys](tests/integration/keys/mod.rs) file. ```rust -pub async fn should_mset_a_non_empty_map(client: RedisClient, config: RedisConfig) -> Result<(), RedisError> { - // macro to panic if a value isn't nil/None +pub async fn should_mget_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "a{1}"); check_null!(client, "b{1}"); check_null!(client, "c{1}"); - let mut map: HashMap = HashMap::new(); - // MSET args all have to map to the same cluster node - map.insert("a{1}".into(), 1.into()); - map.insert("b{1}".into(), 2.into()); - map.insert("c{1}".into(), 3.into()); - - let _ = client.mset(map).await?; - let a = client.get("a{1}").await?; - let b = client.get("b{1}").await?; - let c = client.get("c{1}").await?; - - assert_eq!(a.as_i64().unwrap(), 1); - assert_eq!(b.as_i64().unwrap(), 2); - assert_eq!(c.as_i64().unwrap(), 3); + let expected: Vec<(&str, RedisValue)> = vec![("a{1}", 1.into()), ("b{1}", 2.into()), ("c{1}", 3.into())]; + for (key, value) in expected.iter() { + let _: () = client.set(*key, value.clone(), None, None, false).await?; + } + let values: Vec = client.mget(vec!["a{1}", "b{1}", "c{1}"]).await?; + assert_eq!(values, vec![1, 2, 3]); Ok(()) } - -// should panic -pub async fn should_error_mset_empty_map(client: RedisClient, config: RedisConfig) -> Result<(), RedisError> { - client.mset(RedisMap::new()).await.map(|_| ()) -} ``` 2. Call the tests from the [centralized server tests](tests/integration/centralized.rs). @@ -428,8 +411,7 @@ pub async fn should_error_mset_empty_map(client: RedisClient, config: RedisConfi mod keys { // .. - centralized_test!(keys, should_mset_a_non_empty_map); - centralized_test_panic!(keys, should_error_mset_empty_map); + centralized_test!(keys, should_mget_values); } ``` @@ -440,12 +422,11 @@ mod keys { mod keys { // .. - cluster_test!(keys, should_mset_a_non_empty_map); - cluster_test_panic!(keys, should_error_mset_empty_map); + cluster_test!(keys, should_mget_values); } ``` -This will generate test wrappers to call your test function against both centralized and clustered redis servers with pipelined and non-pipelined clients. +This will generate test wrappers to call your test function against both centralized and clustered redis servers with pipelined and non-pipelined clients in RESP2 and RESP3 modes. # Misc @@ -486,4 +467,4 @@ Fortunately Tokio has a mechanism for tasks like this: [block_in_place](https:// To make these operations less impactful on an application this library provides a feature called `blocking-encoding`. This feature will use `block_in_place` for CPU-bound operations that operate on values over a certain size, **but should only be enabled if the caller uses a multi-thread runtime**. -See the [globals](./src/globals.rs) file for information on configuring the size threshold where CPU-bound tasks will use this interface. \ No newline at end of file +See the [globals](./src/globals.rs) file for information on configuring the size threshold where CPU-bound tasks will use this interface. diff --git a/Cargo.toml b/Cargo.toml index 7cfad2b2..8e1002ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fred" -version = "4.3.2" +version = "5.0.0-beta.1" authors = ["Alec Embke "] edition = "2018" description = "An async Redis client for Rust built on Futures and Tokio." @@ -16,32 +16,37 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] +arcstr = "1.1" +arc-swap = "1.5" tokio = { version = "1", features = ["full"] } -bytes = "1.0" +tokio-util = { version = "0.6", features = ["codec"] } +cfg-if = "1.0.0" +bytes = "1.1" +bytes-utils = "0.1" futures = "0.3" parking_lot = "0.11" lazy_static = "1.4" -redis-protocol = "3.0" +redis-protocol = { version = "4.0", features = ["decode-mut"] } log = "0.4" pretty_env_logger = "0.4" float-cmp = "0.8" url = "2.2" -native-tls = { version = "0.2", optional = true } -tokio-native-tls = { version = "0.3", optional = true } -rand = "0.8" -tokio-util = { version = "0.6", features = ["codec"] } tokio-stream = "0.1" -indexmap = { version = "1.7", optional = true } sha-1 = "0.9" -tracing = { version = "0.1", optional = true } -tracing-futures = { version = "0.2", optional = true } +rand = "0.8" async-trait = "0.1" semver = "1.0" +native-tls = { version = "0.2", optional = true } +tokio-native-tls = { version = "0.3", optional = true } +tracing = { version = "0.1", optional = true } +tracing-futures = { version = "0.2", optional = true } nom = { version = "6.1", optional = true } +serde_json = { version = "1", optional = true } [dev-dependencies] prometheus = "0.12" subprocess = "0.2.7" +maplit = "1.0" [lib] doc = true @@ -60,15 +65,29 @@ required-features = ["partial-tracing"] name = "monitor" required-features = ["monitor"] +[[example]] +name = "prometheus" +required-features = ["metrics"] + +[[example]] +name = "pubsub" +required-features = ["subscriber-client"] + +[[example]] +name = "serde" +required-features = ["serde-json"] + [features] -default = ["ignore-auth-error", "pool-prefer-active", "enable-tls", "metrics"] +default = ["ignore-auth-error", "pool-prefer-active", "enable-tls"] +fallback = [] +serde-json = ["serde_json"] +subscriber-client = [] metrics = [] ignore-auth-error = [] enable-tls = ["native-tls", "tokio-native-tls"] vendored-tls = ["enable-tls", "native-tls/vendored"] mocks = [] reconnect-on-auth-error = [] -index-map = ["indexmap"] pool-prefer-active = [] full-tracing = ["partial-tracing", "tracing", "tracing-futures"] partial-tracing = ["tracing", "tracing-futures"] diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..747457a1 --- /dev/null +++ b/Makefile @@ -0,0 +1,26 @@ +SHELL := /bin/bash + +.PHONY: clean install test-all-features test-default-features test-no-features test-sentinel test-all test + +clean: + rm -rf tests/tmp/redis* && cargo clean + +install: + source tests/environ && tests/scripts/full_install.sh + +test-all-features: + source tests/environ && tests/runners/all-features.sh + +test-default-features: + source tests/environ && tests/runners/default-features.sh + +test-no-features: + source tests/environ && tests/runners/no-features.sh + +test-sentinel: + source tests/environ && tests/runners/sentinel-features.sh + +test-all: + source tests/environ && tests/runners/everything.sh + +test: test-default-features \ No newline at end of file diff --git a/README.md b/README.md index b221c5f2..146d7b40 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ cargo add fred ## Features -* Flexible and generic client interfaces. +* Supports RESP2 and RESP3 protocol modes. * Supports clustered, centralized, and sentinel Redis deployments. * Optional built-in reconnection logic with multiple backoff policies. * Publish-Subscribe and keyspace events interfaces. @@ -63,14 +63,16 @@ cargo add fred * Supports streaming results from the `MONITOR` command. * Supports custom commands provided by third party modules. * Supports TLS connections. -* Handles cluster rebalancing operations without downtime or errors. * Supports streaming interfaces for scanning functions. * Options to automatically [pipeline](https://redis.io/topics/pipelining) requests when possible. * Automatically retry requests under bad network conditions. -* Support for configuring global settings that can affect performance under different network conditions. Callers can configure backpressure settings, when and how the underlying socket is flushed, and how many times requests are attempted. * Built-in tracking for network latency and payload size metrics. -* A client pooling interface to round-robin requests among a pool of clients. -* Built in support for [tracing](https://crates.io/crates/tracing). +* An optional client pooling interface to round-robin requests among a pool of clients. +* An optional sentinel client for interacting directly with sentinel nodes to manually fail over servers, etc. +* An optional pubsub subscriber client that will automatically manage channel subscriptions. +* Optional built in support for JSON values. + +**Note: Fred requires Tokio 1.x or above. Actix users must be using 4.x or above as a result.** ## Tracing @@ -86,23 +88,24 @@ When a client is initialized it will generate a unique client name with a prefix ## Compile Time Features -| Name | Default | Description | -|---------------------------- |---------|----------------------------------------------------------------------------------------------------------------------------------------------| -| enable-tls | x | Enable TLS support. This requires OpenSSL (or equivalent) dependencies. | -| vendored-tls | | Enable TLS support, using vendored OpenSSL (or equivalent) dependencies, if possible. | -| ignore-auth-error | x | Ignore auth errors that occur when a password is supplied but not required. | -| metrics | x | Enable the metrics interface to track overall latency, network latency, and request/response sizes. | -| reconnect-on-auth-error | | A NOAUTH error is treated the same as a general connection failure and the client will reconnect based on the reconnection policy. | -| index-map | | Use [IndexMap](https://docs.rs/indexmap/*/indexmap/) instead of [HashMap](https://doc.rust-lang.org/std/collections/struct.HashMap.html) as the backing store for Redis Map types. This is useful for testing and may also be useful for callers. | -| pool-prefer-active | x | Prefer connected clients over clients in a disconnected state when using the `RedisPool` interface. | -| full-tracing | | Enable full [tracing](./src/trace/README.md) support. This can emit a lot of data so a partial tracing feature is also provided. | -| partial-tracing | | Enable partial [tracing](./src/trace/README.md) support, only emitting traces for top level commands and network latency. Note: this has a non-trivial impact on [performance](./bin/pipeline_test/README.md#Examples). | -| blocking-encoding | | Use a blocking task for encoding or decoding frames over a [certain size](./src/modules/globals.rs). This can be useful for clients that send or receive large payloads, but will only work when used with a multi-thread Tokio runtime. | -| network-logs | | Enable TRACE level logging statements that will print out all data sent to or received from the server. | -| custom-reconnect-errors | | Enable an interface for callers to customize the types of errors that should automatically trigger reconnection logic. | -| monitor | | Enable an interface for running the `MONITOR` command. | -| sentinel-client | | Enable an interface for communicating directly with Sentinel nodes. This is not necessary to use normal Redis clients behind a sentinel layer. | -| sentinel-auth | | Enable an interface for using different authentication credentials to sentinel nodes. | +| Name | Default | Description | +|---------------------------- |---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| enable-tls | x | Enable TLS support. This requires OpenSSL (or equivalent) dependencies. | +| vendored-tls | | Enable TLS support, using vendored OpenSSL (or equivalent) dependencies, if possible. | +| ignore-auth-error | x | Ignore auth errors that occur when a password is supplied but not required. | +| metrics | | Enable the metrics interface to track overall latency, network latency, and request/response sizes. | +| reconnect-on-auth-error | | A NOAUTH error is treated the same as a general connection failure and the client will reconnect based on the reconnection policy. This is [recommended](https://github.com/StackExchange/StackExchange.Redis/issues/1273#issuecomment-651823824) if callers are using ElastiCache. | +| pool-prefer-active | x | Prefer connected clients over clients in a disconnected state when using the `RedisPool` interface. | +| full-tracing | | Enable full [tracing](./src/trace/README.md) support. This can emit a lot of data so a partial tracing feature is also provided. | +| partial-tracing | | Enable partial [tracing](./src/trace/README.md) support, only emitting traces for top level commands and network latency. Note: this has a non-trivial impact on [performance](./bin/pipeline_test/README.md#Examples). | +| blocking-encoding | | Use a blocking task for encoding or decoding frames over a [certain size](./src/modules/globals.rs). This can be useful for clients that send or receive large payloads, but will only work when used with a multi-thread Tokio runtime. | +| network-logs | | Enable TRACE level logging statements that will print out all data sent to or received from the server. These are the only logging statements that can ever contain potentially sensitive user data. | +| custom-reconnect-errors | | Enable an interface for callers to customize the types of errors that should automatically trigger reconnection logic. | +| monitor | | Enable an interface for running the `MONITOR` command. | +| sentinel-client | | Enable an interface for communicating directly with Sentinel nodes. This is not necessary to use normal Redis clients behind a sentinel layer. | +| sentinel-auth | | Enable an interface for using different authentication credentials to sentinel nodes. | +| subscriber-client | | Enable a higher level subscriber client that manages channel subscription state for callers. | +| serde-json | | Enable an interface to automatically convert Redis types to JSON. | ## Environment Variables @@ -111,14 +114,10 @@ When a client is initialized it will generate a unique client name with a prefix | FRED_DISABLE_CERT_VERIFICATION | `false` | Disable certificate verification when using TLS features. | | FRED_DISABLE_HOST_VERIFICATION | `false` | Disable host verification when using TLS features. | -These are environment variables because they're dangerous in production and callers should be forced to surface them in a loud and obvious way. - ## Pipelining The caller can toggle [pipelining](https://redis.io/topics/pipelining) via flags on the `RedisConfig` provided to a client to enable automatic pipelining for commands whenever possible. These settings can drastically affect performance on both the server and client, but further performance tuning may be necessary to avoid issues such as using too much memory on the client or server while buffering commands. -See the global performance tuning functions for more information on how to tune backpressure or other relevant settings related to pipelining. - This module also contains a [separate test application](bin/pipeline_test) that can be used to demonstrate the effects of pipelining. This test application also contains some helpful information on how to use the tracing features. ## ACL & Authentication @@ -127,9 +126,9 @@ Prior to the introduction of ACL commands in Redis version 6 clients would authe If callers are using ACLs and Redis version >=6.x they can configure the client to automatically authenticate by using the `username` and `password` fields on the provided `RedisConfig`. -**It is required that the authentication information provided to the `RedisConfig` allows the client to run `CLIENT SETNAME` and `CLUSTER NODES`.** Callers can still change users via the `auth` command later, but it recommended to instead use the username and password provided to the `RedisConfig` so that the client can automatically authenticate after reconnecting. +**It is required that the authentication information provided to the `RedisConfig` allows the client to run `CLIENT SETNAME` and `CLUSTER NODES`.** Callers can still change users via the `AUTH` command later, but it recommended to instead use the username and password provided to the `RedisConfig` so that the client can automatically authenticate after reconnecting. -If this is not possible callers need to ensure that the default user can run the two commands above. Additionally, it is recommended to move any calls to the `auth` command inside the `on_reconnect` block. +If this is not possible callers need to ensure that the default user can run the two commands above. Additionally, it is recommended to move any calls to the `AUTH` or `HELLO` command inside the `on_reconnect` block. ## Redis Sentinel diff --git a/bin/pipeline_test/src/main.rs b/bin/pipeline_test/src/main.rs index eb6c17e2..a9a258d9 100644 --- a/bin/pipeline_test/src/main.rs +++ b/bin/pipeline_test/src/main.rs @@ -14,10 +14,13 @@ extern crate log; extern crate pretty_env_logger; use clap::{App, ArgMatches}; -use fred::pool::StaticRedisPool; +use fred::globals; +use fred::pool::RedisPool; use fred::prelude::*; +use fred::types::{BackpressureConfig, PerformanceConfig}; use opentelemetry::global; use opentelemetry::sdk::trace::{self, IdGenerator, Sampler}; +use std::default::Default; use std::sync::atomic::AtomicUsize; use std::sync::Arc; use std::thread::{self, JoinHandle as ThreadJoinHandle}; @@ -34,7 +37,6 @@ static DEFAULT_PORT: u16 = 6379; static TEST_KEY: &'static str = "foo"; mod utils; -use fred::globals; #[derive(Debug)] struct Argv { @@ -154,7 +156,6 @@ fn spawn_client_task( fn main() { pretty_env_logger::init(); - globals::set_backpressure_count(1000000); let argv = parse_argv(); info!("Running with configuration: {:?}", argv); @@ -165,9 +166,17 @@ fn main() { let counter = Arc::new(AtomicUsize::new(0)); let config = RedisConfig { server: ServerConfig::new_centralized(&argv.host, argv.port), + performance: PerformanceConfig { + pipeline: argv.pipeline, + backpressure: BackpressureConfig { + max_in_flight_commands: 100_000_000, + ..Default::default() + }, + ..Default::default() + }, ..Default::default() }; - let pool = StaticRedisPool::new(config, argv.pool)?; + let pool = RedisPool::new(config, argv.pool)?; info!("Connecting to {}:{}...", argv.host, argv.port); let _ = pool.connect(None); diff --git a/examples/README.md b/examples/README.md index 0b058651..4cdd7df7 100644 --- a/examples/README.md +++ b/examples/README.md @@ -10,10 +10,11 @@ Examples * [Lua](./lua.rs) - Use the Lua scripting interface on a client. * [Scan](./scan.rs) - Use the SCAN interface to scan and read keys. * [Prometheus](./prometheus.rs) - Use the metrics interface with prometheus. -* [Static Pool](./static_pool.rs) - Use a redis pool that cannot be modified after being created. -* [Dynamic Pool](./dynamic_pool.rs) - Use a redis pool that can be scaled up and down at runtime. +* [Pool](./pool.rs) - Use a redis connection pool. * [Resilience](./resilience.rs) - Configure the client to work under bad network conditions or against unreliable servers. * [Monitor](./monitor.rs) - Process a `MONITOR` stream. * [Sentinel](./sentinel.rs) - Connect using a sentinel deployment. +* [Serde](./serde.rs) - Use the `serde-json` feature to convert values and responses to JSON automatically. +* [Custom](./custom.rs) - Send custom commands or operate on RESP protocol frames directly. Or check out the [tests](../tests/integration) for more examples. \ No newline at end of file diff --git a/examples/basic.rs b/examples/basic.rs index baab16ea..a4ed2759 100644 --- a/examples/basic.rs +++ b/examples/basic.rs @@ -1,20 +1,19 @@ use fred::prelude::*; +use fred::types::{BackpressureConfig, PerformanceConfig, RespVersion}; use futures::stream::StreamExt; -use std::time::Duration; -use tokio::time::sleep; +use std::default::Default; const DATABASE: u8 = 2; #[tokio::main] async fn main() -> Result<(), RedisError> { - // example showing full config options + // example showing a full kitchen sink configuration + // use `..Default::default` to fill in defaults wherever needed let config = RedisConfig { // whether to skip reconnect logic when first connecting fail_fast: true, // server configuration server: ServerConfig::new_centralized("127.0.0.1", 6379), - // whether to automatically pipeline commands - pipeline: true, // how to handle commands sent while a connection is blocked blocking: Blocking::Block, // an optional username, if using ACL rules @@ -25,6 +24,34 @@ async fn main() -> Result<(), RedisError> { tls: None, // whether to enable tracing tracing: false, + // the protocol version to use + version: RespVersion::RESP2, + // the database to automatically select after connecting or reconnecting + database: Some(DATABASE), + // performance tuning options + performance: PerformanceConfig { + // whether or not to automatically pipeline commands + pipeline: true, + // the max number of frames to feed into a socket before flushing it + max_feed_count: 1000, + // a default timeout to apply to all commands (0 means no timeout) + default_command_timeout_ms: 0, + // the amount of time to wait before syncing cluster state after a MOVED or ASK error + cluster_cache_update_delay_ms: 10, + // the maximum number of times to retry commands when connections close unexpectedly + max_command_attempts: 3, + // backpressure config options + backpressure: BackpressureConfig { + // whether to disable automatic backpressure features + disable_auto_backpressure: false, + // whether to disable scaling backpressure `sleep` durations based on the number of in-flight commands + disable_backpressure_scaling: false, + // the minimum amount of time to `sleep` when applying automatic backpressure + min_sleep_duration_ms: 100, + // the max number of in-flight commands before applying backpressure or returning backpressure errors + max_in_flight_commands: 5000, + }, + }, }; // configure exponential backoff when reconnecting, starting at 100 ms, and doubling each time up to 30 sec. let policy = ReconnectPolicy::new_exponential(0, 100, 30_000, 2); @@ -37,8 +64,7 @@ async fn main() -> Result<(), RedisError> { // run a function whenever the client reconnects tokio::spawn(client.on_reconnect().for_each(move |client| async move { println!("Client {} reconnected.", client.id()); - // select the database each time we connect or reconnect - let _ = client.select(DATABASE).await; + Ok(()) })); let _ = client.connect(Some(policy)); @@ -48,13 +74,19 @@ async fn main() -> Result<(), RedisError> { let foo: Option = client.get("foo").await?; println!("Foo: {:?}", foo); - let _ = client + let _: () = client .set("foo", "bar", Some(Expiration::EX(1)), Some(SetOptions::NX), false) .await?; // or use turbofish. the first type is always the response type. println!("Foo: {:?}", client.get::("foo").await?); + // update performance config options as needed + client.update_perf_config(PerformanceConfig { + max_command_attempts: 100, + ..Default::default() + }); + let _ = client.quit().await?; Ok(()) } diff --git a/examples/custom.rs b/examples/custom.rs new file mode 100644 index 00000000..c25dc77e --- /dev/null +++ b/examples/custom.rs @@ -0,0 +1,55 @@ +use fred::prelude::*; +use fred::types::{CustomCommand, RedisKey}; +use redis_protocol::resp3::types::Frame; +use std::convert::TryInto; + +fn get_hash_slot(client: &RedisClient, key: &'static str) -> (RedisKey, Option) { + let key = RedisKey::from_static_str(key); + let hash_slot = if client.is_clustered() { + // or use redis_protocol::redis_keyslot(key.as_bytes()) + Some(key.cluster_hash()) + } else { + None + }; + + (key, hash_slot) +} + +#[tokio::main] +async fn main() -> Result<(), RedisError> { + pretty_env_logger::init(); + + let client = RedisClient::new(RedisConfig::default()); + let _ = client.connect(Some(ReconnectPolicy::default())); + let _ = client.wait_for_connect().await?; + + let (key, hash_slot) = get_hash_slot(&client, "ts:carbon_monoxide"); + let args: Vec = vec![key.into(), 1112596200.into(), 1112603400.into()]; + let cmd = CustomCommand::new_static("TS.RANGE", hash_slot, false); + /* + >> TS.RANGE ts:carbon_monoxide 1112596200 1112603400 + 1) 1) (integer) 1112596200 + 2) "2.4" + 2) 1) (integer) 1112599800 + 2) "2.1" + 3) 1) (integer) 1112603400 + 2) "2.2" + */ + let values: Vec<(i64, f64)> = client.custom(cmd, args).await?; + println!("TS.RANGE Values: {:?}", values); + + let _: () = client.lpush("foo", vec![1, 2, 3]).await?; + let (key, hash_slot) = get_hash_slot(&client, "foo"); + let cmd = CustomCommand::new_static("LRANGE", hash_slot, false); + // some types require TryInto + let args: Vec = vec![key.into(), 0.into(), 3_u64.try_into()?]; + // returns a frame (https://docs.rs/redis-protocol/latest/redis_protocol/resp3/types/enum.Frame.html) + let frame = client.custom_raw(cmd, args).await?; + // or convert back to client types + let value: RedisValue = frame.try_into()?; + // and/or use the type conversion shorthand + let value: Vec = value.convert()?; + println!("LRANGE Values: {:?}", value); + + Ok(()) +} diff --git a/examples/dynamic_pool.rs b/examples/dynamic_pool.rs deleted file mode 100644 index 6af388e6..00000000 --- a/examples/dynamic_pool.rs +++ /dev/null @@ -1,36 +0,0 @@ -use fred::pool::DynamicRedisPool; -use fred::prelude::*; -use futures::stream::StreamExt; - -#[tokio::main] -async fn main() -> Result<(), RedisError> { - let config = RedisConfig::default(); - // the max size isn't a hard limit - it just determines the size of the client array when the pool is initialized - let pool = DynamicRedisPool::new(config, None, 5, 10); - - let _ = pool.connect().await; - let _ = pool.wait_for_connect().await?; - - // modify the size of the pool at runtime - let (new_client, _) = pool.scale_up().await; - if let Some(old_client) = pool.scale_down(true).await { - assert_eq!(new_client.id(), old_client.id()); - } - - for client in pool.clients() { - println!("Client ID {} in pool.", client.id()); - } - - // due to the locking required by the resizing operations the Deref trait cannot be used with this pool implementation. - // if modifications to the pool are not required at runtime the static pool is usually easier to use - let _ = pool.next().get("foo").await?; - let _ = pool.next().set("foo", "bar", None, None, false).await?; - let _ = pool.next().get("foo").await?; - - // if the pool can be empty a function exists that will lazily create a new client, if needed. - // if the pool is not empty this just calls `next` without creating a new client. - let _ = pool.next_connect(true).await.get("foo").await?; - - let _ = pool.quit_pool().await; - Ok(()) -} diff --git a/examples/globals.rs b/examples/globals.rs index e03b71aa..7d56d01d 100644 --- a/examples/globals.rs +++ b/examples/globals.rs @@ -1,9 +1,12 @@ use fred::globals; use fred::prelude::*; -use futures::stream::StreamExt; + +#[cfg(feature = "custom-reconnect-errors")] +use globals::ReconnectError; #[tokio::main] async fn main() -> Result<(), RedisError> { + // note: in fred v5 the majority of the performance options were moved from the globals to the `RedisConfig` let config = RedisConfig::default(); let client = RedisClient::new(config); @@ -12,15 +15,15 @@ async fn main() -> Result<(), RedisError> { println!("Client failed to connect with error: {:?}", error); } - globals::set_feed_count(500); - globals::set_cluster_error_cache_delay_ms(100); - globals::set_min_backpressure_time_ms(20); - globals::set_default_command_timeout(30_000); - globals::set_max_command_attempts(5); - globals::set_backpressure_count(100); - + globals::set_sentinel_connection_timeout_ms(10_000); #[cfg(feature = "blocking-encoding")] globals::set_blocking_encode_threshold(10_000_000); + #[cfg(feature = "custom-reconnect-errors")] + globals::set_custom_reconnect_errors(vec![ + ReconnectError::ClusterDown, + ReconnectError::MasterDown, + ReconnectError::ReadOnly, + ]); // do stuff... diff --git a/examples/lua.rs b/examples/lua.rs index 98210c02..23cb893c 100644 --- a/examples/lua.rs +++ b/examples/lua.rs @@ -1,5 +1,5 @@ -use fred::client::util as fred_utils; use fred::prelude::*; +use fred::util as fred_utils; static SCRIPTS: &'static [&'static str] = &[ "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", @@ -13,7 +13,7 @@ async fn main() -> Result<(), RedisError> { let config = RedisConfig::default(); let client = RedisClient::new(config); - let jh = client.connect(None); + let _jh = client.connect(None); let _ = client.wait_for_connect().await?; for script in SCRIPTS.iter() { diff --git a/examples/static_pool.rs b/examples/pool.rs similarity index 51% rename from examples/static_pool.rs rename to examples/pool.rs index e727d3ad..3050e35e 100644 --- a/examples/static_pool.rs +++ b/examples/pool.rs @@ -1,23 +1,18 @@ -use fred::pool::StaticRedisPool; +use fred::pool::RedisPool; use fred::prelude::*; #[tokio::main] async fn main() -> Result<(), RedisError> { let config = RedisConfig::default(); - let pool = StaticRedisPool::new(config, 5)?; - - let jhs = pool.connect(None); + let pool = RedisPool::new(config, 5)?; + let _ = pool.connect(None); let _ = pool.wait_for_connect().await?; - // use the pool like any other RedisClient with the Deref trait + // use the pool like any other RedisClient let _ = pool.get("foo").await?; let _ = pool.set("foo", "bar", None, None, false).await?; let _ = pool.get("foo").await?; let _ = pool.quit_pool().await; - // from here the pool can be restarted by calling `connect` again, if needed - for jh in jhs.into_iter() { - let _ = jh.await; - } Ok(()) } diff --git a/examples/prometheus.rs b/examples/prometheus.rs index 194df13d..d03a0306 100644 --- a/examples/prometheus.rs +++ b/examples/prometheus.rs @@ -1,5 +1,4 @@ use fred::prelude::*; -use futures::stream::StreamExt; use prometheus::{register_int_gauge_vec, IntGaugeVec}; fn sample_metrics( diff --git a/examples/pubsub.rs b/examples/pubsub.rs index a1ae3748..290d0566 100644 --- a/examples/pubsub.rs +++ b/examples/pubsub.rs @@ -1,3 +1,5 @@ +#[allow(unused_imports)] +use fred::clients::SubscriberClient; use fred::prelude::*; use futures::stream::StreamExt; use std::time::Duration; @@ -50,3 +52,36 @@ async fn main() -> Result<(), RedisError> { let _ = subscribe_task.abort(); Ok(()) } + +#[allow(dead_code)] +// requires the `subscriber-client` feature +async fn subscriber_example() -> Result<(), RedisError> { + let subscriber = SubscriberClient::new(RedisConfig::default()); + let _ = subscriber.connect(Some(ReconnectPolicy::default())); + let _ = subscriber.wait_for_connect().await?; + + let jh = tokio::spawn(subscriber.on_message().for_each(|(channel, message)| { + println!("Recv {:?} on channel {}", message, channel); + Ok(()) + })); + // spawn a task to manage subscription state automatically whenever the client reconnects + let _ = subscriber.manage_subscriptions(); + + let _ = subscriber.subscribe("foo").await?; + let _ = subscriber.psubscribe(vec!["bar*", "baz*"]).await?; + // if the connection closes after this point for any reason the client will automatically re-subscribe to "foo", "bar*", and "baz*" after reconnecting + + println!("Subscriber channels: {:?}", subscriber.tracked_channels()); // "foo" + println!("Subscriber patterns: {:?}", subscriber.tracked_patterns()); // "bar*", "baz*" + + let _ = subscriber.unsubscribe("foo").await?; + // now it will only automatically re-subscribe to "bar*" and "baz*" after reconnecting + + // force a re-subscription call to all channels or patterns + let _ = subscriber.resubscribe_all().await?; + // unsubscribe from all channels and patterns + let _ = subscriber.unsubscribe_all().await?; + // the subscriber client also supports all the basic redis commands + let _ = subscriber.quit().await; + let _ = jh.await; +} diff --git a/examples/scan.rs b/examples/scan.rs index cf531b95..86ab6ea2 100644 --- a/examples/scan.rs +++ b/examples/scan.rs @@ -1,6 +1,5 @@ use fred::prelude::*; -use futures::stream::{StreamExt, TryStreamExt}; -use std::collections::VecDeque; +use futures::stream::StreamExt; static COUNT: u32 = 50; @@ -53,6 +52,7 @@ async fn main() -> Result<(), RedisError> { let _ = delete_fake_data(&client).await?; let _ = client.quit().await?; + // optionally wait for the task driving the connection to finish let _ = jh.await; Ok(()) } diff --git a/examples/serde.rs b/examples/serde.rs new file mode 100644 index 00000000..28acd2ae --- /dev/null +++ b/examples/serde.rs @@ -0,0 +1,33 @@ +use fred::prelude::*; +use serde_json::{json, Value}; + +#[tokio::main] +async fn main() -> Result<(), RedisError> { + let config = RedisConfig::default(); + let client = RedisClient::new(config); + let _ = client.connect(None); + let _ = client.wait_for_connect().await?; + + let value = json!({ + "foo": "a", + "bar": "b" + }); + // json `Value` objects can also be used interchangeably with `RedisMap` type arguments. + // however, in this case we want to set "wibble" as a JSON string. + let _: () = client.set("wibble", value.to_string(), None, None, false).await?; + // but we could also pass `value` to `hset` directly and act on it like a map + let _: () = client.hset("wobble", value.clone()).await?; + + // converting back to a json `Value` will also try to parse nested json strings, if possible. + // the type conversion logic will not even attempt the json parsing if the value doesn't look like json. + // if a value looks like json, but cannot be parsed as json, then it will be returned as a string. + let get_result: Value = client.get("wibble").await?; + println!("GET Result: {}", get_result); + let hget_result: Value = client.hgetall("wobble").await?; + println!("HGETALL Result: {}", hget_result); + + assert_eq!(value, get_result); + assert_eq!(value, hget_result); + let _ = client.quit().await; + Ok(()) +} diff --git a/examples/tls.rs b/examples/tls.rs index 79ba8a31..4a239950 100644 --- a/examples/tls.rs +++ b/examples/tls.rs @@ -1,6 +1,6 @@ use fred::prelude::*; use fred::types::TlsConfig; -use futures::stream::StreamExt; + #[tokio::main] async fn main() -> Result<(), RedisError> { diff --git a/src/client.rs b/src/client.rs deleted file mode 100644 index b9911a4e..00000000 --- a/src/client.rs +++ /dev/null @@ -1,3267 +0,0 @@ -use crate::commands; -use crate::error::{RedisError, RedisErrorKind}; -use crate::modules::inner::{MultiPolicy, RedisClientInner}; -use crate::modules::response::RedisResponse; -use crate::multiplexer::commands as multiplexer_commands; -use crate::multiplexer::utils as multiplexer_utils; -use crate::types::*; -use crate::utils; -use futures::Stream; -use std::collections::HashMap; -use std::convert::TryInto; -use std::fmt; -use std::ops::Deref; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::mpsc::unbounded_channel; -use tokio::time::interval as tokio_interval; -use tokio_stream::wrappers::UnboundedReceiverStream; - -#[cfg(feature = "metrics")] -use crate::modules::metrics::Stats; - -/// Utility functions used by the client that may also be useful to callers. -pub mod util { - pub use crate::utils::f64_to_redis_string; - pub use crate::utils::redis_string_to_f64; - pub use redis_protocol::redis_keyslot; - - /// Calculate the SHA1 hash output as a hex string. This is provided for clients that use the Lua interface to manage their own script caches. - pub fn sha1_hash(input: &str) -> String { - use sha1::Digest; - - let mut hasher = sha1::Sha1::new(); - hasher.update(input.as_bytes()); - format!("{:x}", hasher.finalize()) - } -} - -/// A wrapping struct for commands in a MULTI/EXEC transaction block. -pub struct TransactionClient { - client: RedisClient, - finished: bool, -} - -impl fmt::Display for TransactionClient { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "[TransactionClient {}: {}]", - self.client.inner.id, - self.client.state() - ) - } -} - -impl Drop for TransactionClient { - fn drop(&mut self) { - if !self.finished { - warn!( - "{}: Dropping transaction client without finishing transaction!", - self.inner.client_name() - ); - } - } -} - -impl TransactionClient { - /// Executes all previously queued commands in a transaction and restores the connection state to normal. - /// - /// - /// - /// Note: Automatic request retry policies in the event of a connection closing can present problems for transactions. - /// If the underlying connection closes while a transaction is in process the client will abort the transaction by - /// returning a `Canceled` error to the caller of any pending intermediate command, as well as this one. It's up to - /// the caller to retry transactions as needed. - pub async fn exec(mut self) -> Result - where - R: RedisResponse, - { - self.finished = true; - commands::server::exec(&self.client.inner).await?.convert() - } - - /// Flushes all previously queued commands in a transaction and restores the connection state to normal. - /// - /// - pub async fn discard(mut self) -> Result<(), RedisError> { - self.finished = true; - commands::server::discard(&self.client.inner).await - } - - /// Read the hash slot against which this transaction will run, if known. - pub fn hash_slot(&self) -> Option { - utils::read_locked(&self.inner.multi_block).and_then(|b| b.hash_slot) - } -} - -impl Deref for TransactionClient { - type Target = RedisClient; - - fn deref(&self) -> &Self::Target { - &self.client - } -} - -#[doc(hidden)] -impl<'a> From<&'a Arc> for TransactionClient { - fn from(inner: &'a Arc) -> Self { - TransactionClient { - client: RedisClient::from(inner), - finished: false, - } - } -} - -/// A Redis client struct. -#[derive(Clone)] -pub struct RedisClient { - pub(crate) inner: Arc, -} - -impl fmt::Display for RedisClient { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "[RedisClient {}: {}]", self.inner.id, self.state()) - } -} - -#[doc(hidden)] -impl<'a> From<&'a Arc> for RedisClient { - fn from(inner: &'a Arc) -> RedisClient { - RedisClient { inner: inner.clone() } - } -} - -impl RedisClient { - /// Create a new client instance without connecting to the server. - pub fn new(config: RedisConfig) -> RedisClient { - RedisClient { - inner: RedisClientInner::new(config), - } - } - - /// The unique ID identifying this client and underlying connections. All connections will use the ID of the client that created them. - /// - /// The client will use [CLIENT SETNAME](https://redis.io/commands/client-setname) upon initializing a connection so client logs can be associated with server logs. - pub fn id(&self) -> &Arc { - &self.inner.id - } - - /// Read the config used to initialize the client. - pub fn client_config(&self) -> RedisConfig { - utils::read_locked(&self.inner.config) - } - - /// Read the reconnect policy used to initialize the client. - pub fn client_reconnect_policy(&self) -> Option { - self.inner.policy.read().clone() - } - - /// Whether or not the client has a reconnection policy. - pub fn has_reconnect_policy(&self) -> bool { - self.inner.policy.read().is_some() - } - - /// Connect to the Redis server with an optional reconnection policy. - /// - /// This function returns a `JoinHandle` to a task that drives the connection. It will not resolve - /// until the connection closes, and if a reconnection policy with unlimited attempts - /// is provided then the `JoinHandle` will run forever, or until `QUIT` is called. - /// - /// **Note:** See the [RedisConfig](crate::types::RedisConfig) documentation for more information on how the `policy` is applied to new connections. - pub fn connect(&self, policy: Option) -> ConnectHandle { - let inner = self.inner.clone(); - - tokio::spawn(async move { - let result = multiplexer_commands::init(&inner, policy).await; - if let Err(ref e) = result { - multiplexer_utils::emit_connect_error(&inner, e); - } - utils::set_client_state(&inner.state, ClientState::Disconnected); - result - }) - } - - /// Wait for the client to connect to the server, or return an error if the initial connection cannot be established. - /// If the client is already connected this future will resolve immediately. - /// - /// This can be used with `on_reconnect` to separate initialization logic that needs to occur only on the first connection attempt vs subsequent attempts. - pub async fn wait_for_connect(&self) -> Result<(), RedisError> { - utils::wait_for_connect(&self.inner).await - } - - /// Create a new `RedisClient` from the config provided to this client. - /// - /// The returned client will not be connected to the server, and it will use new connections after connecting. - pub fn clone_new(&self) -> Self { - RedisClient::new(utils::read_locked(&self.inner.config)) - } - - /// Whether or not the client will pipeline commands. - pub fn is_pipelined(&self) -> bool { - self.inner.is_pipelined() - } - - /// Return a future that will ping the server on an interval. - /// - /// If the underlying connection closes or `PING` returns an error this will break the interval and this function will need to be called again. - pub async fn enable_heartbeat(&self, interval: Duration) -> Result<(), RedisError> { - let mut interval = tokio_interval(interval); - loop { - interval.tick().await; - - if utils::is_locked_some(&self.inner.multi_block) { - let inner = &self.inner; - _debug!(inner, "Skip heartbeat while inside transaction."); - continue; - } - - if self.state() != ClientState::Connected { - break; - } - let _ = self.ping().await?; - } - - Ok(()) - } - - /// Read the number of request redeliveries. - /// - /// This is the number of times a request had to be sent again due to a connection closing while waiting on a response. - pub fn read_redelivery_count(&self) -> usize { - utils::read_atomic(&self.inner.redeliver_count) - } - - /// Read and reset the number of request redeliveries. - pub fn take_redelivery_count(&self) -> usize { - utils::set_atomic(&self.inner.redeliver_count, 0) - } - - /// Read the state of the underlying connection(s). - /// - /// If running against a cluster the underlying state will reflect the state of the least healthy connection, if any. - pub fn state(&self) -> ClientState { - self.inner.state.read().clone() - } - - /// Whether or not the client has an active connection to the server(s). - pub fn is_connected(&self) -> bool { - *self.inner.state.read() == ClientState::Connected - } - - /// Read the cached state of the cluster used for routing commands to the correct cluster nodes. - pub fn cached_cluster_state(&self) -> Option { - self.inner.cluster_state.read().clone() - } - - /// Read latency metrics across all commands. - /// - /// This metric reflects the total latency experienced by callers, including time spent waiting in memory to be written and network latency. - /// Features such as automatic reconnect, `reconnect-on-auth-error`, and frame serialization time can all affect these values. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn read_latency_metrics(&self) -> Stats { - self.inner.latency_stats.read().read_metrics() - } - - /// Read and consume latency metrics, resetting their values afterwards. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn take_latency_metrics(&self) -> Stats { - self.inner.latency_stats.write().take_metrics() - } - - /// Read network latency metrics across all commands. - /// - /// This metric only reflects time spent waiting on a response. It will factor in reconnect time if a response doesn't arrive due to a connection - /// closing, but it does not factor in the time a command spends waiting to be written, serialization time, backpressure, etc. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn read_network_latency_metrics(&self) -> Stats { - self.inner.network_latency_stats.read().read_metrics() - } - - /// Read and consume network latency metrics, resetting their values afterwards. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn take_network_latency_metrics(&self) -> Stats { - self.inner.network_latency_stats.write().take_metrics() - } - - /// Read request payload size metrics across all commands. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn read_req_size_metrics(&self) -> Stats { - self.inner.req_size_stats.read().read_metrics() - } - - /// Read and consume request payload size metrics, resetting their values afterwards. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn take_req_size_metrics(&self) -> Stats { - self.inner.req_size_stats.write().take_metrics() - } - - /// Read response payload size metrics across all commands. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn read_res_size_metrics(&self) -> Stats { - self.inner.res_size_stats.read().read_metrics() - } - - /// Read and consume response payload size metrics, resetting their values afterwards. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn take_res_size_metrics(&self) -> Stats { - self.inner.res_size_stats.write().take_metrics() - } - - /// Read the number of buffered commands that have not yet been sent to the server. - pub fn command_queue_len(&self) -> usize { - utils::read_atomic(&self.inner.cmd_buffer_len) - } - - /// Listen for reconnection notifications. - /// - /// This function can be used to receive notifications whenever the client successfully reconnects in order to select the right database again, re-subscribe to channels, etc. - /// - /// A reconnection event is also triggered upon first connecting to the server. - pub fn on_reconnect(&self) -> impl Stream { - let (tx, rx) = unbounded_channel(); - self.inner.reconnect_tx.write().push_back(tx); - - UnboundedReceiverStream::new(rx) - } - - /// Listen for protocol and connection errors. This stream can be used to more intelligently handle errors that may - /// not appear in the request-response cycle, and so cannot be handled by response futures. - /// - /// Similar to [on_message](Self::on_message) and [on_reconnect](Self::on_reconnect), this function does not need to be called again if the connection closes. - pub fn on_error(&self) -> impl Stream { - let (tx, rx) = unbounded_channel(); - self.inner.error_tx.write().push_back(tx); - - UnboundedReceiverStream::new(rx) - } - - /// Listen for `(channel, message)` tuples on the publish-subscribe interface. **Keyspace events are not sent on this interface.** - /// - /// If the connection to the Redis server closes for any reason this function does not need to be called again. Messages will start appearing on the original stream after [subscribe](Self::subscribe) is called again. - pub fn on_message(&self) -> impl Stream { - let (tx, rx) = unbounded_channel(); - self.inner.message_tx.write().push_back(tx); - - UnboundedReceiverStream::new(rx) - } - - /// Listen for keyspace and keyevent notifications on the publish subscribe interface. - /// - /// Callers still need to configure the server and subscribe to the relevant channels, but this interface will format the messages automatically. - /// - /// If the connection to the Redis server closes for any reason this function does not need to be called again. - /// - /// - pub fn on_keyspace_event(&self) -> impl Stream { - let (tx, rx) = unbounded_channel(); - self.inner.keyspace_tx.write().push_back(tx); - - UnboundedReceiverStream::new(rx) - } - - /// Whether or not the client is using a clustered Redis deployment. - pub fn is_clustered(&self) -> bool { - utils::is_clustered(&self.inner.config) - } - - /// Close the connection to the Redis server. The returned future resolves when the command has been written to the socket, - /// not when the connection has been fully closed. Some time after this future resolves the future returned by [connect](Self::connect) - /// will resolve which indicates that the connection has been fully closed. - /// - /// This function will also close all error, pubsub message, and reconnection event streams. - pub async fn quit(&self) -> Result<(), RedisError> { - commands::server::quit(&self.inner).await - } - - /// Shut down the server and quit the client. - /// - /// - pub async fn shutdown(&self, flags: Option) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::server::shutdown(&self.inner, flags).await - } - - /// Split a clustered Redis client into a list of centralized clients - one for each primary node in the cluster. - /// - /// Some Redis commands are not designed to work with hash slots against a clustered deployment. For example, - /// `FLUSHDB`, `PING`, etc all work on one node in the cluster, but no interface exists for the client to - /// select a specific node in the cluster against which to run the command. This function allows the caller to - /// create a list of clients such that each connect to one of the primary nodes in the cluster and functions - /// as if it were operating against a single centralized Redis server. - /// - /// **The clients returned by this function will not be connected to their associated servers. The caller needs to - /// call `connect` on each client before sending any commands.** - /// - /// Note: For this to work reliably this function needs to be called each time nodes are added or removed from the cluster. - pub async fn split_cluster(&self) -> Result, RedisError> { - if utils::is_clustered(&self.inner.config) { - commands::server::split(&self.inner).await - } else { - Err(RedisError::new( - RedisErrorKind::Unknown, - "Client is not using a clustered deployment.", - )) - } - } - - /// Request for authentication in a password-protected Redis server. Returns ok if successful. - /// - /// The client will automatically authenticate with the default user if a password is provided in the associated `RedisConfig` when calling [connect](Self::connect). - /// - /// If running against clustered servers this function will authenticate all connections. - /// - /// - pub async fn auth(&self, username: Option, password: S) -> Result<(), RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::server::auth(&self.inner, username, password).await - } - - /// Instruct Redis to start an Append Only File rewrite process. - /// - /// - pub async fn bgrewriteaof(&self) -> Result - where - R: RedisResponse, - { - utils::disallow_during_transaction(&self.inner)?; - commands::server::bgrewriteaof(&self.inner).await?.convert() - } - - /// Save the DB in background. - /// - /// - pub async fn bgsave(&self) -> Result - where - R: RedisResponse, - { - utils::disallow_during_transaction(&self.inner)?; - commands::server::bgsave(&self.inner).await?.convert() - } - - /// Return the number of keys in the selected database. - /// - /// - pub async fn dbsize(&self) -> Result - where - R: RedisResponse, - { - commands::server::dbsize(&self.inner).await?.convert() - } - - /// Run a custom command that is not yet supported via another interface on this client. This is most useful when interacting with third party modules or extensions. - /// - /// This interface makes some assumptions about the nature of the provided command: - /// * For commands comprised of multiple command strings they must be separated by a space. - /// * The command string will be sent to the server exactly as written. - /// * Arguments will be sent in the order provided. - /// * When used against a cluster the caller must provide the correct hash slot to identify the cluster - /// node that should receive the command. If one is not provided the command will be sent to a random node - /// in the cluster. - /// - /// Callers should use the re-exported [redis_keyslot](crate::client::util::redis_keyslot) function to hash the command's key, if necessary. - /// - /// Callers that find themselves using this interface for commands that are not a part of a third party extension should file an issue - /// to add the command to the list of supported commands. This interface should be used with caution as it may break the automatic pipeline - /// features in the client if command flags are not properly configured. - pub async fn custom(&self, cmd: CustomCommand, args: Vec) -> Result - where - R: RedisResponse, - T: TryInto, - T::Error: Into, - { - commands::server::custom(&self.inner, cmd, utils::try_into_vec(args)?) - .await? - .convert() - } - - /// Subscribe to a channel on the PubSub interface, returning the number of channels to which the client is subscribed. - /// - /// Any messages received before [on_message](Self::on_message) is called will be discarded, so it's usually best to call [on_message](Self::on_message) - /// before calling [subscribe](Self::subscribe) for the first time. - /// - /// - pub async fn subscribe(&self, channel: S) -> Result - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::pubsub::subscribe(&self.inner, channel).await - } - - /// Unsubscribe from a channel on the PubSub interface, returning the number of channels to which hte client is subscribed. - /// - /// - pub async fn unsubscribe(&self, channel: S) -> Result - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::pubsub::unsubscribe(&self.inner, channel).await - } - - /// Subscribes the client to the given patterns. - /// - /// - pub async fn psubscribe(&self, patterns: S) -> Result, RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::pubsub::psubscribe(&self.inner, patterns).await - } - - /// Unsubscribes the client from the given patterns, or from all of them if none is given. - /// - /// - pub async fn punsubscribe(&self, patterns: S) -> Result, RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::pubsub::punsubscribe(&self.inner, patterns).await - } - - /// Publish a message on the PubSub interface, returning the number of clients that received the message. - /// - /// - pub async fn publish(&self, channel: S, message: V) -> Result - where - R: RedisResponse, - S: Into, - V: TryInto, - V::Error: Into, - { - commands::pubsub::publish(&self.inner, channel, to!(message)?) - .await? - .convert() - } - - /// Enter a MULTI block, executing subsequent commands as a transaction. - /// - /// - /// - /// The `abort_on_error` flag indicates whether the client should automatically abort the transaction when an error is received from a command within the transaction. - /// - /// See for more information. If this flag is `false` then the caller will need to `exec` or `discard` - /// the transaction before either retrying or moving on to new commands outside the transaction. - pub async fn multi(&self, abort_on_error: bool) -> Result { - if utils::is_clustered(&self.inner.config) { - let policy = MultiPolicy { - hash_slot: None, - abort_on_error, - sent_multi: false, - }; - - if !utils::check_and_set_none(&self.inner.multi_block, policy) { - return Err(RedisError::new( - RedisErrorKind::InvalidCommand, - "Client is already within a MULTI transaction.", - )); - } - - debug!("{}: Defer MULTI command until hash slot is specified.", self.inner.id); - Ok(TransactionClient::from(&self.inner)) - } else { - let policy = MultiPolicy { - hash_slot: None, - abort_on_error, - sent_multi: true, - }; - if !utils::check_and_set_none(&self.inner.multi_block, policy) { - return Err(RedisError::new( - RedisErrorKind::InvalidCommand, - "Client is already within a MULTI transaction.", - )); - } - - commands::server::multi(&self.inner) - .await - .map(|_| TransactionClient::from(&self.inner)) - } - } - - /// Whether or not the client is currently in the middle of a MULTI transaction. - pub fn in_transaction(&self) -> bool { - utils::is_locked_some(&self.inner.multi_block) - } - - /// Force the client to abort any in-flight transactions. - /// - /// The `Drop` trait on the [TransactionClient] is not async and so callers that accidentally drop the transaction - /// client associated with a MULTI block before calling EXEC or DISCARD can use this function to exit the transaction. - /// A warning log line will be emitted if the transaction client is dropped before calling EXEC or DISCARD. - pub async fn force_discard_transaction(&self) -> Result<(), RedisError> { - commands::server::discard(&self.inner).await - } - - /// Marks the given keys to be watched for conditional execution of a transaction. - /// - /// - pub async fn watch(&self, keys: K) -> Result<(), RedisError> - where - K: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::keys::watch(&self.inner, keys).await - } - - /// Flushes all the previously watched keys for a transaction. - /// - /// - pub async fn unwatch(&self) -> Result<(), RedisError> { - commands::keys::unwatch(&self.inner).await - } - - /// Delete the keys in all databases. - /// - /// - pub async fn flushall(&self, r#async: bool) -> Result - where - R: RedisResponse, - { - commands::server::flushall(&self.inner, r#async).await?.convert() - } - - /// Delete the keys on all nodes in the cluster. This is a special function that does not map directly to the Redis interface. - /// - /// Note: ASYNC flushing of the db behaves badly with the automatic pipelining features of this library. If async flushing of the entire cluster - /// is a requirement then callers should use [split_cluster](Self::split_cluster) with [flushall](Self::flushall) on each client instead. - pub async fn flushall_cluster(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::server::flushall_cluster(&self.inner).await - } - - /// Ping the Redis server. - /// - /// - pub async fn ping(&self) -> Result<(), RedisError> { - commands::server::ping(&self.inner).await?.convert() - } - - /// Select the database this client should use. - /// - /// - pub async fn select(&self, db: u8) -> Result<(), RedisError> { - commands::server::select(&self.inner, db).await?.convert() - } - - /// Read info about the Redis server. - /// - /// - pub async fn info(&self, section: Option) -> Result - where - R: RedisResponse, - { - commands::server::info(&self.inner, section).await?.convert() - } - - /// This command will start a coordinated failover between the currently-connected-to master and one of its replicas. - /// - /// - pub async fn failover( - &self, - to: Option<(String, u16)>, - force: bool, - abort: bool, - timeout: Option, - ) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::server::failover(&self.inner, to, force, abort, timeout).await - } - - /// Return the UNIX TIME of the last DB save executed with success. - /// - /// - pub async fn lastsave(&self) -> Result - where - R: RedisResponse, - { - commands::server::lastsave(&self.inner).await?.convert() - } - - // ------------- SLOWLOG ---------------- - - /// This command is used in order to read the slow queries log. - /// - /// - pub async fn slowlog_get(&self, count: Option) -> Result, RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::slowlog::slowlog_get(&self.inner, count).await - } - - /// This command is used in order to read length of the slow queries log. - /// - /// - pub async fn slowlog_length(&self) -> Result { - utils::disallow_during_transaction(&self.inner)?; - commands::slowlog::slowlog_length(&self.inner).await - } - - /// This command is used to reset the slow queries log. - /// - /// - pub async fn slowlog_reset(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::slowlog::slowlog_reset(&self.inner).await - } - - // ------------- CLIENT --------------- - - /// Return the ID of the current connection. - /// - /// Note: Against a clustered deployment this will return the ID of a random connection. See [connection_ids](Self::connection_ids) for more information. - /// - /// - pub async fn client_id(&self) -> Result - where - R: RedisResponse, - { - commands::client::client_id(&self.inner).await?.convert() - } - - /// Read the connection IDs for the active connections to each server. - /// - /// The returned map contains each server's `host:port` and the result of calling `CLIENT ID` on the connection. - /// - /// Note: despite being async this function will usually return cached information from the client if possible. - pub async fn connection_ids(&self) -> Result, i64>, RedisError> { - utils::read_connection_ids(&self.inner).await.ok_or(RedisError::new( - RedisErrorKind::Unknown, - "Failed to read connection IDs", - )) - } - - /// Update the client's sentinel nodes list if using the sentinel interface. - /// - /// The client will automatically update this when connections to the primary server close. - pub async fn update_sentinel_nodes(&self) -> Result<(), RedisError> { - utils::update_sentinel_nodes(&self.inner).await - } - - /// The command returns information and statistics about the current client connection in a mostly human readable format. - /// - /// - pub async fn client_info(&self) -> Result - where - R: RedisResponse, - { - commands::client::client_info(&self.inner).await?.convert() - } - - /// Close a given connection or set of connections. - /// - /// - pub async fn client_kill(&self, filters: Vec) -> Result - where - R: RedisResponse, - { - commands::client::client_kill(&self.inner, filters).await?.convert() - } - - /// The CLIENT LIST command returns information and statistics about the client connections server in a mostly human readable format. - /// - /// - pub async fn client_list(&self, r#type: Option, ids: Option>) -> Result - where - R: RedisResponse, - I: Into, - { - commands::client::client_list(&self.inner, r#type, ids).await?.convert() - } - - /// The CLIENT GETNAME returns the name of the current connection as set by CLIENT SETNAME. - /// - /// - pub async fn client_getname(&self) -> Result - where - R: RedisResponse, - { - commands::client::client_getname(&self.inner).await?.convert() - } - - /// Assign a name to the current connection. - /// - /// **Note: The client automatically generates a unique name for each client that is shared by all underlying connections. - /// Use [Self::id] to read the automatically generated name.** - /// - /// - pub async fn client_setname(&self, name: S) -> Result<(), RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::client::client_setname(&self.inner, name).await - } - - /// CLIENT PAUSE is a connections control command able to suspend all the Redis clients for the specified amount of time (in milliseconds). - /// - /// - pub async fn client_pause(&self, timeout: i64, mode: Option) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::client::client_pause(&self.inner, timeout, mode).await - } - - /// CLIENT UNPAUSE is used to resume command processing for all clients that were paused by CLIENT PAUSE. - /// - /// - pub async fn client_unpause(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::client::client_unpause(&self.inner).await - } - - /// The CLIENT REPLY command controls whether the server will reply the client's commands. The following modes are available: - /// - /// - pub async fn client_reply(&self, flag: ClientReplyFlag) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::client::client_reply(&self.inner, flag).await - } - - /// This command can unblock, from a different connection, a client blocked in a blocking operation, such as for instance BRPOP or XREAD or WAIT. - /// - /// Note: this command is sent on a backchannel connection and will work even when the main connection is blocked. - /// - /// - pub async fn client_unblock(&self, id: S, flag: Option) -> Result - where - R: RedisResponse, - S: Into, - { - commands::client::client_unblock(&self.inner, id, flag).await?.convert() - } - - /// A convenience function to unblock any blocked connection on this client. - pub async fn unblock_self(&self, flag: Option) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::client::unblock_self(&self.inner, flag).await - } - - // ------------- CLUSTER ----------- - - /// Advances the cluster config epoch. - /// - /// - pub async fn cluster_bumpepoch(&self) -> Result - where - R: RedisResponse, - { - commands::cluster::cluster_bumpepoch(&self.inner).await?.convert() - } - - /// Deletes all slots from a node. - /// - /// - pub async fn cluster_flushslots(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_flushslots(&self.inner).await - } - - /// Returns the node's id. - /// - /// - pub async fn cluster_myid(&self) -> Result - where - R: RedisResponse, - { - commands::cluster::cluster_myid(&self.inner).await?.convert() - } - - /// Read the current cluster node configuration. - /// - /// Note: The client keeps a cached, parsed version of the cluster state in memory available at [cached_cluster_state](Self::cached_cluster_state). - /// - /// - pub async fn cluster_nodes(&self) -> Result { - commands::cluster::cluster_nodes(&self.inner).await?.convert() - } - - /// Forces a node to save the nodes.conf configuration on disk. - /// - /// - pub async fn cluster_saveconfig(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_saveconfig(&self.inner).await - } - - /// CLUSTER SLOTS returns details about which cluster slots map to which Redis instances. - /// - /// - pub async fn cluster_slots(&self) -> Result { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_slots(&self.inner).await - } - - /// CLUSTER INFO provides INFO style information about Redis Cluster vital parameters. - /// - /// - pub async fn cluster_info(&self) -> Result { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_info(&self.inner).await - } - - /// This command is useful in order to modify a node's view of the cluster configuration. Specifically it assigns a set of hash slots to the node receiving the command. - /// - /// - pub async fn cluster_add_slots(&self, slots: S) -> Result<(), RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_add_slots(&self.inner, slots).await - } - - /// The command returns the number of failure reports for the specified node. - /// - /// - pub async fn cluster_count_failure_reports(&self, node_id: S) -> Result - where - R: RedisResponse, - S: Into, - { - commands::cluster::cluster_count_failure_reports(&self.inner, node_id) - .await? - .convert() - } - - /// Returns the number of keys in the specified Redis Cluster hash slot. - /// - /// - pub async fn cluster_count_keys_in_slot(&self, slot: u16) -> Result - where - R: RedisResponse, - { - commands::cluster::cluster_count_keys_in_slot(&self.inner, slot) - .await? - .convert() - } - - /// The CLUSTER DELSLOTS command asks a particular Redis Cluster node to forget which master is serving the hash slots specified as arguments. - /// - /// - pub async fn cluster_del_slots(&self, slots: S) -> Result<(), RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_del_slots(&self.inner, slots).await - } - - /// This command, that can only be sent to a Redis Cluster replica node, forces the replica to start a manual failover of its master instance. - /// - /// - pub async fn cluster_failover(&self, flag: Option) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_failover(&self.inner, flag).await - } - - /// The command is used in order to remove a node, specified via its node ID, from the set of known nodes of the Redis Cluster node receiving the command. - /// In other words the specified node is removed from the nodes table of the node receiving the command. - /// - /// - pub async fn cluster_forget(&self, node_id: S) -> Result<(), RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_forget(&self.inner, node_id).await - } - - /// The command returns an array of keys names stored in the contacted node and hashing to the specified hash slot. - /// - /// - pub async fn cluster_get_keys_in_slot(&self, slot: u16, count: u64) -> Result - where - R: RedisResponse, - { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_get_keys_in_slot(&self.inner, slot, count) - .await? - .convert() - } - - /// Returns an integer identifying the hash slot the specified key hashes to. - /// - /// - pub async fn cluster_keyslot(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::cluster::cluster_keyslot(&self.inner, key).await?.convert() - } - - /// CLUSTER MEET is used in order to connect different Redis nodes with cluster support enabled, into a working cluster. - /// - /// - pub async fn cluster_meet(&self, ip: S, port: u16) -> Result<(), RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_meet(&self.inner, ip, port).await - } - - /// The command reconfigures a node as a replica of the specified master. If the node receiving the command is an empty master, as - /// a side effect of the command, the node role is changed from master to replica. - /// - /// - pub async fn cluster_replicate(&self, node_id: S) -> Result<(), RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_replicate(&self.inner, node_id).await - } - - /// The command provides a list of replica nodes replicating from the specified master node. - /// - /// - pub async fn cluster_replicas(&self, node_id: S) -> Result - where - S: Into, - { - commands::cluster::cluster_replicas(&self.inner, node_id) - .await? - .convert() - } - - /// Reset a Redis Cluster node, in a more or less drastic way depending on the reset type, that can be hard or soft. Note that - /// this command does not work for masters if they hold one or more keys, in that case to completely reset a master node keys - /// must be removed first, e.g. by using FLUSHALL first, and then CLUSTER RESET. - /// - /// - pub async fn cluster_reset(&self, mode: Option) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_reset(&self.inner, mode).await - } - - /// This command sets a specific config epoch in a fresh node. - /// - /// - pub async fn cluster_set_config_epoch(&self, epoch: u64) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_set_config_epoch(&self.inner, epoch).await - } - - /// CLUSTER SETSLOT is responsible of changing the state of a hash slot in the receiving node in different ways. - /// - /// - pub async fn cluster_setslot(&self, slot: u16, state: ClusterSetSlotState) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::cluster::cluster_setslot(&self.inner, slot, state).await - } - - // -------------- CONFIG --------------- - - /// Resets the statistics reported by Redis using the INFO command. - /// - /// - pub async fn config_resetstat(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::config::config_resetstat(&self.inner).await - } - - /// The CONFIG REWRITE command rewrites the redis.conf file the server was started with, applying the minimal changes needed to make it - /// reflect the configuration currently used by the server, which may be different compared to the original one because of the use of - /// the CONFIG SET command. - /// - /// - pub async fn config_rewrite(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::config::config_rewrite(&self.inner).await - } - - /// The CONFIG GET command is used to read the configuration parameters of a running Redis server. - /// - /// - pub async fn config_get(&self, parameter: S) -> Result - where - R: RedisResponse, - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::config::config_get(&self.inner, parameter).await?.convert() - } - - /// The CONFIG SET command is used in order to reconfigure the server at run time without the need to restart Redis. - /// - /// - pub async fn config_set(&self, parameter: P, value: V) -> Result<(), RedisError> - where - P: Into, - V: TryInto, - V::Error: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::config::config_set(&self.inner, parameter, to!(value)?).await - } - - // ---------------- MEMORY -------------------- - - /// The MEMORY DOCTOR command reports about different memory-related issues that the Redis server experiences, and advises about possible remedies. - /// - /// - pub async fn memory_doctor(&self) -> Result { - utils::disallow_during_transaction(&self.inner)?; - commands::memory::memory_doctor(&self.inner).await - } - - /// The MEMORY MALLOC-STATS command provides an internal statistics report from the memory allocator. - /// - /// - pub async fn memory_malloc_stats(&self) -> Result { - utils::disallow_during_transaction(&self.inner)?; - commands::memory::memory_malloc_stats(&self.inner).await - } - - /// The MEMORY PURGE command attempts to purge dirty pages so these can be reclaimed by the allocator. - /// - /// - pub async fn memory_purge(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::memory::memory_purge(&self.inner).await - } - - /// The MEMORY STATS command returns an Array reply about the memory usage of the server. - /// - /// - pub async fn memory_stats(&self) -> Result { - utils::disallow_during_transaction(&self.inner)?; - commands::memory::memory_stats(&self.inner).await - } - - /// The MEMORY USAGE command reports the number of bytes that a key and its value require to be stored in RAM. - /// - /// - pub async fn memory_usage(&self, key: K, samples: Option) -> Result, RedisError> - where - K: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::memory::memory_usage(&self.inner, key, samples).await - } - - // ---------------- ACL ------------------------ - - /// When Redis is configured to use an ACL file (with the aclfile configuration option), this command will reload the - /// ACLs from the file, replacing all the current ACL rules with the ones defined in the file. - /// - /// - pub async fn acl_load(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_load(&self.inner).await - } - - /// When Redis is configured to use an ACL file (with the aclfile configuration option), this command will save the - /// currently defined ACLs from the server memory to the ACL file. - /// - /// - pub async fn acl_save(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_save(&self.inner).await - } - - /// The command shows the currently active ACL rules in the Redis server. - /// - /// - pub async fn acl_list(&self) -> Result - where - R: RedisResponse, - { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_list(&self.inner).await?.convert() - } - - /// The command shows a list of all the usernames of the currently configured users in the Redis ACL system. - /// - /// - pub async fn acl_users(&self) -> Result - where - R: RedisResponse, - { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_users(&self.inner).await?.convert() - } - - /// The command returns all the rules defined for an existing ACL user. - /// - /// - pub async fn acl_getuser(&self, username: S) -> Result, RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_getuser(&self.inner, username).await - } - - /// Create an ACL user with the specified rules or modify the rules of an existing user. - /// - /// - pub async fn acl_setuser(&self, username: S, rules: Vec) -> Result<(), RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_setuser(&self.inner, username, rules).await - } - - /// Delete all the specified ACL users and terminate all the connections that are authenticated with such users. - /// - /// - pub async fn acl_deluser(&self, usernames: S) -> Result - where - R: RedisResponse, - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_deluser(&self.inner, usernames).await?.convert() - } - - /// The command shows the available ACL categories if called without arguments. If a category name is given, - /// the command shows all the Redis commands in the specified category. - /// - /// - pub async fn acl_cat(&self, category: Option) -> Result, RedisError> - where - S: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_cat(&self.inner, category).await?.convert() - } - - /// Generate a password with length `bits`, returning the password. - pub async fn acl_genpass(&self, bits: Option) -> Result { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_genpass(&self.inner, bits).await?.convert() - } - - /// Return the username the current connection is authenticated with. New connections are authenticated - /// with the "default" user. - /// - /// - pub async fn acl_whoami(&self) -> Result { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_whoami(&self.inner).await?.convert() - } - - /// Read `count` recent ACL security events. - /// - /// - pub async fn acl_log_count(&self, count: Option) -> Result { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_log_count(&self.inner, count).await - } - - /// Clear the ACL security events logs. - /// - /// - pub async fn acl_log_reset(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::acl::acl_log_reset(&self.inner).await - } - - // ----------- KEYS ------------ - - /// Return a random key from the currently selected database. - /// - /// - pub async fn randomkey(&self) -> Result - where - R: RedisResponse, - { - commands::keys::randomkey(&self.inner).await?.convert() - } - - /// This command copies the value stored at the source key to the destination key. - /// - /// - pub async fn copy(&self, source: S, destination: D, db: Option, replace: bool) -> Result - where - R: RedisResponse, - S: Into, - D: Into, - { - commands::keys::copy(&self.inner, source, destination, db, replace) - .await? - .convert() - } - - /// Serialize the value stored at `key` in a Redis-specific format and return it as bulk string. - /// - /// - pub async fn dump(&self, key: K) -> Result - where - K: Into, - { - commands::keys::dump(&self.inner, key).await - } - - /// Create a key associated with a value that is obtained by deserializing the provided serialized value - /// - /// - pub async fn restore( - &self, - key: K, - ttl: i64, - serialized: RedisValue, - replace: bool, - absttl: bool, - idletime: Option, - frequency: Option, - ) -> Result - where - K: Into, - { - commands::keys::restore(&self.inner, key, ttl, serialized, replace, absttl, idletime, frequency).await - } - - /// Set a value with optional NX|XX, EX|PX|EXAT|PXAT|KEEPTTL, and GET arguments. - /// - /// - pub async fn set( - &self, - key: K, - value: V, - expire: Option, - options: Option, - get: bool, - ) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - let (key, value) = (key.into(), to!(value)?); - commands::keys::set(&self.inner, key, value, expire, options, get) - .await? - .convert() - } - - /// Read a value from the server. - /// - /// - pub async fn get(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::get(&self.inner, key).await?.convert() - } - - /// Returns the substring of the string value stored at `key` with offsets `start` and `end` (both inclusive). - /// - /// Note: Command formerly called SUBSTR in Redis verison <=2.0. - /// - /// - pub async fn getrange(&self, key: K, start: usize, end: usize) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::getrange(&self.inner, key, start, end).await?.convert() - } - - /// Overwrites part of the string stored at `key`, starting at the specified `offset`, for the entire length of `value`. - /// - /// - pub async fn setrange(&self, key: K, offset: u32, value: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::keys::setrange(&self.inner, key, offset, to!(value)?) - .await? - .convert() - } - - /// Atomically sets `key` to `value` and returns the old value stored at `key`. - /// - /// Returns an error if `key` does not hold string value. Returns nil if `key` does not exist. - /// - /// - pub async fn getset(&self, key: K, value: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::keys::getset(&self.inner, key, to!(value)?).await?.convert() - } - - /// Get the value of key and delete the key. This command is similar to GET, except for the fact that it also deletes the key on success (if and only if the key's value type is a string). - /// - /// - pub async fn getdel(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::getdel(&self.inner, key).await?.convert() - } - - /// Returns the length of the string value stored at key. An error is returned when key holds a non-string value. - /// - /// - pub async fn strlen(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::strlen(&self.inner, key).await?.convert() - } - - /// Removes the specified keys. A key is ignored if it does not exist. - /// - /// Returns the number of keys removed. - /// - /// - pub async fn del(&self, keys: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::del(&self.inner, keys).await?.convert() - } - - /// Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned. - /// - /// - pub async fn mget(&self, keys: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::mget(&self.inner, keys).await?.convert() - } - - /// Sets the given keys to their respective values. - /// - /// - pub async fn mset(&self, values: V) -> Result - where - V: Into, - { - commands::keys::mset(&self.inner, values).await - } - - /// Sets the given keys to their respective values. MSETNX will not perform any operation at all even if just a single key already exists. - /// - /// - pub async fn msetnx(&self, values: V) -> Result - where - R: RedisResponse, - V: Into, - { - commands::keys::msetnx(&self.inner, values).await?.convert() - } - - /// Increments the number stored at `key` by one. If the key does not exist, it is set to 0 before performing the operation. - /// - /// Returns an error if the value at key is of the wrong type. - /// - /// - pub async fn incr(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::incr(&self.inner, key).await?.convert() - } - - /// Increments the number stored at `key` by `val`. If the key does not exist, it is set to 0 before performing the operation. - /// - /// Returns an error if the value at key is of the wrong type. - /// - /// - pub async fn incr_by(&self, key: K, val: i64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::incr_by(&self.inner, key, val).await?.convert() - } - - /// Increment the string representing a floating point number stored at key by `val`. If the key does not exist, it is set to 0 before performing the operation. - /// - /// Returns an error if key value is the wrong type or if the current value cannot be parsed as a floating point value. - /// - /// - pub async fn incr_by_float(&self, key: K, val: f64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::incr_by_float(&self.inner, key, val).await?.convert() - } - - /// Decrements the number stored at `key` by one. If the key does not exist, it is set to 0 before performing the operation. - /// - /// Returns an error if the key contains a value of the wrong type. - /// - /// - pub async fn decr(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::decr(&self.inner, key).await?.convert() - } - - /// Decrements the number stored at `key` by `val`. If the key does not exist, it is set to 0 before performing the operation. - /// - /// Returns an error if the key contains a value of the wrong type. - /// - /// - pub async fn decr_by(&self, key: K, val: i64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::decr_by(&self.inner, key, val).await?.convert() - } - - /// Returns the remaining time to live of a key that has a timeout, in seconds. - /// - /// - pub async fn ttl(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::ttl(&self.inner, key).await?.convert() - } - - /// Returns the remaining time to live of a key that has a timeout, in milliseconds. - /// - /// - pub async fn pttl(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::pttl(&self.inner, key).await?.convert() - } - - /// Remove the existing timeout on a key, turning the key from volatile (a key with an expiration) - /// to persistent (a key that will never expire as no timeout is associated). - /// - /// Returns a boolean value describing whether or not the timeout was removed. - /// - /// - pub async fn persist(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::persist(&self.inner, key).await?.convert() - } - - /// Set a timeout on key. After the timeout has expired, the key will be automatically deleted. - /// - /// Returns a boolean value describing whether or not the timeout was added. - /// - /// - pub async fn expire(&self, key: K, seconds: i64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::expire(&self.inner, key, seconds).await?.convert() - } - - /// Set a timeout on a key based on a UNIX timestamp. - /// - /// Returns a boolean value describing whether or not the timeout was added. - /// - /// - pub async fn expire_at(&self, key: K, timestamp: i64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::expire_at(&self.inner, key, timestamp).await?.convert() - } - - /// Returns number of keys that exist from the `keys` arguments. - /// - /// - pub async fn exists(&self, keys: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::keys::exists(&self.inner, keys).await?.convert() - } - - // ----------- HASHES ------------------ - - /// Removes the specified fields from the hash stored at `key`. - /// - /// - pub async fn hdel(&self, key: K, fields: F) -> Result - where - R: RedisResponse, - K: Into, - F: Into, - { - commands::hashes::hdel(&self.inner, key, fields).await?.convert() - } - - /// Returns if `field` is an existing field in the hash stored at `key`. - /// - /// - pub async fn hexists(&self, key: K, field: F) -> Result - where - R: RedisResponse, - K: Into, - F: Into, - { - commands::hashes::hexists(&self.inner, key, field).await?.convert() - } - - /// Returns the value associated with `field` in the hash stored at `key`. - /// - /// - pub async fn hget(&self, key: K, field: F) -> Result - where - R: RedisResponse, - K: Into, - F: Into, - { - commands::hashes::hget(&self.inner, key, field).await?.convert() - } - - /// Returns all fields and values of the hash stored at `key`. - /// - /// - pub async fn hgetall(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::hashes::hgetall(&self.inner, key).await?.convert() - } - - /// Increments the number stored at `field` in the hash stored at `key` by `increment`. - /// - /// - pub async fn hincrby(&self, key: K, field: F, increment: i64) -> Result - where - R: RedisResponse, - K: Into, - F: Into, - { - commands::hashes::hincrby(&self.inner, key, field, increment) - .await? - .convert() - } - - /// Increment the specified `field` of a hash stored at `key`, and representing a floating point number, by the specified `increment`. - /// - /// - pub async fn hincrbyfloat(&self, key: K, field: F, increment: f64) -> Result - where - R: RedisResponse, - K: Into, - F: Into, - { - commands::hashes::hincrbyfloat(&self.inner, key, field, increment) - .await? - .convert() - } - - /// Returns all field names in the hash stored at `key`. - /// - /// - pub async fn hkeys(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::hashes::hkeys(&self.inner, key).await?.convert() - } - - /// Returns the number of fields contained in the hash stored at `key`. - /// - /// - pub async fn hlen(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::hashes::hlen(&self.inner, key).await?.convert() - } - - /// Returns the values associated with the specified `fields` in the hash stored at `key`. - /// - /// - pub async fn hmget(&self, key: K, fields: F) -> Result - where - R: RedisResponse, - K: Into, - F: Into, - { - commands::hashes::hmget(&self.inner, key, fields).await?.convert() - } - - /// Sets the specified fields to their respective values in the hash stored at `key`. - /// - /// - pub async fn hmset(&self, key: K, values: V) -> Result - where - R: RedisResponse, - K: Into, - V: Into, - { - commands::hashes::hmset(&self.inner, key, values).await?.convert() - } - - /// Sets fields in the hash stored at `key` to their provided values. - /// - /// - pub async fn hset(&self, key: K, values: V) -> Result - where - R: RedisResponse, - K: Into, - V: Into, - { - commands::hashes::hset(&self.inner, key, values).await?.convert() - } - - /// Sets `field` in the hash stored at `key` to `value`, only if `field` does not yet exist. - /// - /// - pub async fn hsetnx(&self, key: K, field: F, value: V) -> Result - where - R: RedisResponse, - K: Into, - F: Into, - V: TryInto, - V::Error: Into, - { - commands::hashes::hsetnx(&self.inner, key, field, to!(value)?) - .await? - .convert() - } - - /// When called with just the `key` argument, return a random field from the hash value stored at `key`. - /// - /// If the provided `count` argument is positive, return an array of distinct fields. - /// - /// - pub async fn hrandfield(&self, key: K, count: Option<(i64, bool)>) -> Result - where - R: RedisResponse, - K: Into, - { - commands::hashes::hrandfield(&self.inner, key, count).await?.convert() - } - - /// Returns the string length of the value associated with `field` in the hash stored at `key`. - /// - /// - pub async fn hstrlen(&self, key: K, field: F) -> Result - where - R: RedisResponse, - K: Into, - F: Into, - { - commands::hashes::hstrlen(&self.inner, key, field).await?.convert() - } - - /// Returns all values in the hash stored at `key`. - /// - /// - pub async fn hvals(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::hashes::hvals(&self.inner, key).await?.convert() - } - - // ------------- SETS -------------------- - - /// Add the specified members to the set stored at `key`. - /// - /// - pub async fn sadd(&self, key: K, members: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sets::sadd(&self.inner, key, to!(members)?).await?.convert() - } - - /// Returns the set cardinality (number of elements) of the set stored at `key`. - /// - /// - pub async fn scard(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sets::scard(&self.inner, key).await?.convert() - } - - /// Returns the members of the set resulting from the difference between the first set and all the successive sets. - /// - /// - pub async fn sdiff(&self, keys: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sets::sdiff(&self.inner, keys).await?.convert() - } - - /// This command is equal to SDIFF, but instead of returning the resulting set, it is stored in `destination`. - /// - /// - pub async fn sdiffstore(&self, dest: D, keys: K) -> Result - where - R: RedisResponse, - D: Into, - K: Into, - { - commands::sets::sdiffstore(&self.inner, dest, keys).await?.convert() - } - - /// Returns the members of the set resulting from the intersection of all the given sets. - /// - /// - pub async fn sinter(&self, keys: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sets::sinter(&self.inner, keys).await?.convert() - } - - /// This command is equal to SINTER, but instead of returning the resulting set, it is stored in `destination`. - /// - /// - pub async fn sinterstore(&self, dest: D, keys: K) -> Result - where - R: RedisResponse, - D: Into, - K: Into, - { - commands::sets::sinterstore(&self.inner, dest, keys).await?.convert() - } - - /// Returns if `member` is a member of the set stored at `key`. - /// - /// - pub async fn sismember(&self, key: K, member: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sets::sismember(&self.inner, key, to!(member)?) - .await? - .convert() - } - - /// Returns whether each member is a member of the set stored at `key`. - /// - /// - pub async fn smismember(&self, key: K, members: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sets::smismember(&self.inner, key, to!(members)?) - .await? - .convert() - } - - /// Returns all the members of the set value stored at `key`. - /// - /// - pub async fn smembers(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sets::smembers(&self.inner, key).await?.convert() - } - - /// Move `member` from the set at `source` to the set at `destination`. - /// - /// - pub async fn smove(&self, source: S, dest: D, member: V) -> Result - where - R: RedisResponse, - S: Into, - D: Into, - V: TryInto, - V::Error: Into, - { - commands::sets::smove(&self.inner, source, dest, to!(member)?) - .await? - .convert() - } - - /// Removes and returns one or more random members from the set value store at `key`. - /// - /// - pub async fn spop(&self, key: K, count: Option) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sets::spop(&self.inner, key, count).await?.convert() - } - - /// When called with just the key argument, return a random element from the set value stored at `key`. - /// - /// If the provided `count` argument is positive, return an array of distinct elements. The array's length is either count or the set's cardinality (SCARD), whichever is lower. - /// - /// - pub async fn srandmember(&self, key: K, count: Option) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sets::srandmember(&self.inner, key, count).await?.convert() - } - - /// Remove the specified members from the set stored at `key`. - /// - /// - pub async fn srem(&self, key: K, members: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sets::srem(&self.inner, key, to!(members)?).await?.convert() - } - - /// Returns the members of the set resulting from the union of all the given sets. - /// - /// - pub async fn sunion(&self, keys: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sets::sunion(&self.inner, keys).await?.convert() - } - - /// This command is equal to SUNION, but instead of returning the resulting set, it is stored in `destination`. - /// - /// - pub async fn sunionstore(&self, dest: D, keys: K) -> Result - where - R: RedisResponse, - D: Into, - K: Into, - { - commands::sets::sunionstore(&self.inner, dest, keys).await?.convert() - } - - // ------------- SORTED SETS --------------- - - /// The blocking variant of the ZPOPMIN command. - /// - /// - pub async fn bzpopmin(&self, keys: K, timeout: f64) -> Result, RedisError> - where - K: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::sorted_sets::bzpopmin(&self.inner, keys, timeout).await - } - - /// The blocking variant of the ZPOPMAX command. - /// - /// - pub async fn bzpopmax(&self, keys: K, timeout: f64) -> Result, RedisError> - where - K: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::sorted_sets::bzpopmax(&self.inner, keys, timeout).await - } - - /// Adds all the specified members with the specified scores to the sorted set stored at `key`. - /// - /// - pub async fn zadd( - &self, - key: K, - options: Option, - ordering: Option, - changed: bool, - incr: bool, - values: V, - ) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sorted_sets::zadd(&self.inner, key, options, ordering, changed, incr, to!(values)?) - .await? - .convert() - } - - /// Returns the sorted set cardinality (number of elements) of the sorted set stored at `key`. - /// - /// - pub async fn zcard(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sorted_sets::zcard(&self.inner, key).await?.convert() - } - - /// Returns the number of elements in the sorted set at `key` with a score between `min` and `max`. - /// - /// - pub async fn zcount(&self, key: K, min: f64, max: f64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sorted_sets::zcount(&self.inner, key, min, max) - .await? - .convert() - } - - /// This command is similar to ZDIFFSTORE, but instead of storing the resulting sorted set, it is returned to the client. - /// - /// - pub async fn zdiff(&self, keys: K, withscores: bool) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sorted_sets::zdiff(&self.inner, keys, withscores) - .await? - .convert() - } - - /// Computes the difference between the first and all successive input sorted sets and stores the result in `destination`. - /// - /// - pub async fn zdiffstore(&self, dest: D, keys: K) -> Result - where - R: RedisResponse, - D: Into, - K: Into, - { - commands::sorted_sets::zdiffstore(&self.inner, dest, keys) - .await? - .convert() - } - - /// Increments the score of `member` in the sorted set stored at `key` by `increment`. - /// - /// - pub async fn zincrby(&self, key: K, increment: f64, member: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sorted_sets::zincrby(&self.inner, key, increment, to!(member)?) - .await? - .convert() - } - - /// This command is similar to ZINTERSTORE, but instead of storing the resulting sorted set, it is returned to the client. - /// - /// - pub async fn zinter( - &self, - keys: K, - weights: W, - aggregate: Option, - withscores: bool, - ) -> Result - where - R: RedisResponse, - K: Into, - W: Into, - { - commands::sorted_sets::zinter(&self.inner, keys, weights, aggregate, withscores) - .await? - .convert() - } - - /// Computes the intersection of the sorted sets given by the specified keys, and stores the result in `destination`. - /// - /// - pub async fn zinterstore( - &self, - dest: D, - keys: K, - weights: W, - aggregate: Option, - ) -> Result - where - R: RedisResponse, - D: Into, - K: Into, - W: Into, - { - commands::sorted_sets::zinterstore(&self.inner, dest, keys, weights, aggregate) - .await? - .convert() - } - - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, - /// this command returns the number of elements in the sorted set at key with a value between min and max. - /// - /// - pub async fn zlexcount(&self, key: K, min: M, max: N) -> Result - where - R: RedisResponse, - K: Into, - M: TryInto, - M::Error: Into, - N: TryInto, - N::Error: Into, - { - commands::sorted_sets::zlexcount(&self.inner, key, to!(min)?, to!(max)?) - .await? - .convert() - } - - /// Removes and returns up to count members with the highest scores in the sorted set stored at `key`. - /// - /// - pub async fn zpopmax(&self, key: K, count: Option) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sorted_sets::zpopmax(&self.inner, key, count).await?.convert() - } - - /// Removes and returns up to count members with the lowest scores in the sorted set stored at `key`. - /// - /// - pub async fn zpopmin(&self, key: K, count: Option) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sorted_sets::zpopmin(&self.inner, key, count).await?.convert() - } - - /// When called with just the key argument, return a random element from the sorted set value stored at `key`. - /// - /// - pub async fn zrandmember(&self, key: K, count: Option<(i64, bool)>) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sorted_sets::zrandmember(&self.inner, key, count) - .await? - .convert() - } - - /// This command is like ZRANGE, but stores the result in the `destination` key. - /// - /// - pub async fn zrangestore( - &self, - dest: D, - source: S, - min: M, - max: N, - sort: Option, - rev: bool, - limit: Option, - ) -> Result - where - R: RedisResponse, - D: Into, - S: Into, - M: TryInto, - M::Error: Into, - N: TryInto, - N::Error: Into, - { - commands::sorted_sets::zrangestore(&self.inner, dest, source, to!(min)?, to!(max)?, sort, rev, limit) - .await? - .convert() - } - - /// Returns the specified range of elements in the sorted set stored at `key`. - /// - /// - pub async fn zrange( - &self, - key: K, - min: M, - max: N, - sort: Option, - rev: bool, - limit: Option, - withscores: bool, - ) -> Result - where - K: Into, - M: TryInto, - M::Error: Into, - N: TryInto, - N::Error: Into, - { - commands::sorted_sets::zrange(&self.inner, key, to!(min)?, to!(max)?, sort, rev, limit, withscores).await - } - - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical - /// ordering, this command returns all the elements in the sorted set at `key` with a value between `min` and `max`. - /// - /// - pub async fn zrangebylex( - &self, - key: K, - min: M, - max: N, - limit: Option, - ) -> Result - where - K: Into, - M: TryInto, - M::Error: Into, - N: TryInto, - N::Error: Into, - { - commands::sorted_sets::zrangebylex(&self.inner, key, to!(min)?, to!(max)?, limit).await - } - - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical - /// ordering, this command returns all the elements in the sorted set at `key` with a value between `max` and `min`. - /// - /// - pub async fn zrevrangebylex( - &self, - key: K, - max: M, - min: N, - limit: Option, - ) -> Result - where - K: Into, - M: TryInto, - M::Error: Into, - N: TryInto, - N::Error: Into, - { - commands::sorted_sets::zrevrangebylex(&self.inner, key, to!(max)?, to!(min)?, limit).await - } - - /// Returns all the elements in the sorted set at key with a score between `min` and `max` (including elements - /// with score equal to `min` or `max`). - /// - /// - pub async fn zrangebyscore( - &self, - key: K, - min: M, - max: N, - withscores: bool, - limit: Option, - ) -> Result - where - K: Into, - M: TryInto, - M::Error: Into, - N: TryInto, - N::Error: Into, - { - commands::sorted_sets::zrangebyscore(&self.inner, key, to!(min)?, to!(max)?, withscores, limit).await - } - - /// Returns all the elements in the sorted set at `key` with a score between `max` and `min` (including - /// elements with score equal to `max` or `min`). - /// - /// - pub async fn zrevrangebyscore( - &self, - key: K, - max: M, - min: N, - withscores: bool, - limit: Option, - ) -> Result - where - K: Into, - M: TryInto, - M::Error: Into, - N: TryInto, - N::Error: Into, - { - commands::sorted_sets::zrevrangebyscore(&self.inner, key, to!(max)?, to!(min)?, withscores, limit).await - } - - /// Returns the rank of member in the sorted set stored at `key`, with the scores ordered from low to high. - /// - /// - pub async fn zrank(&self, key: K, member: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sorted_sets::zrank(&self.inner, key, to!(member)?) - .await? - .convert() - } - - /// Removes the specified members from the sorted set stored at `key`. Non existing members are ignored. - /// - /// - pub async fn zrem(&self, key: K, members: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sorted_sets::zrem(&self.inner, key, to!(members)?) - .await? - .convert() - } - - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical - /// ordering, this command removes all elements in the sorted set stored at `key` between the lexicographical range - /// specified by `min` and `max`. - /// - /// - pub async fn zremrangebylex(&self, key: K, min: M, max: N) -> Result - where - R: RedisResponse, - K: Into, - M: TryInto, - M::Error: Into, - N: TryInto, - N::Error: Into, - { - commands::sorted_sets::zremrangebylex(&self.inner, key, to!(min)?, to!(max)?) - .await? - .convert() - } - - /// Removes all elements in the sorted set stored at `key` with rank between `start` and `stop`. - /// - /// - pub async fn zremrangebyrank(&self, key: K, start: i64, stop: i64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sorted_sets::zremrangebyrank(&self.inner, key, start, stop) - .await? - .convert() - } - - /// Removes all elements in the sorted set stored at `key` with a score between `min` and `max`. - /// - /// - pub async fn zremrangebyscore(&self, key: K, min: M, max: N) -> Result - where - R: RedisResponse, - K: Into, - M: TryInto, - M::Error: Into, - N: TryInto, - N::Error: Into, - { - commands::sorted_sets::zremrangebyscore(&self.inner, key, to!(min)?, to!(max)?) - .await? - .convert() - } - - /// Returns the specified range of elements in the sorted set stored at `key`. - /// - /// - pub async fn zrevrange(&self, key: K, start: i64, stop: i64, withscores: bool) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sorted_sets::zrevrange(&self.inner, key, start, stop, withscores) - .await? - .convert() - } - - /// Returns the rank of `member` in the sorted set stored at `key`, with the scores ordered from high to low. - /// - /// - pub async fn zrevrank(&self, key: K, member: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sorted_sets::zrevrank(&self.inner, key, to!(member)?) - .await? - .convert() - } - - /// Returns the score of `member` in the sorted set at `key`. - /// - /// - pub async fn zscore(&self, key: K, member: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sorted_sets::zscore(&self.inner, key, to!(member)?) - .await? - .convert() - } - - /// This command is similar to ZUNIONSTORE, but instead of storing the resulting sorted set, it is returned to the client. - /// - /// - pub async fn zunion( - &self, - keys: K, - weights: W, - aggregate: Option, - withscores: bool, - ) -> Result - where - K: Into, - W: Into, - { - commands::sorted_sets::zunion(&self.inner, keys, weights, aggregate, withscores).await - } - - /// Computes the union of the sorted sets given by the specified keys, and stores the result in `destination`. - /// - /// - pub async fn zunionstore( - &self, - dest: D, - keys: K, - weights: W, - aggregate: Option, - ) -> Result - where - R: RedisResponse, - D: Into, - K: Into, - W: Into, - { - commands::sorted_sets::zunionstore(&self.inner, dest, keys, weights, aggregate) - .await? - .convert() - } - - /// Returns the scores associated with the specified members in the sorted set stored at `key`. - /// - /// - pub async fn zmscore(&self, key: K, members: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sorted_sets::zmscore(&self.inner, key, to!(members)?) - .await? - .convert() - } - - // ------------- LISTS ------------------ - - /// BLPOP is a blocking list pop primitive. It is the blocking version of LPOP because it blocks the connection when there are no elements to pop from - /// any of the given lists. An element is popped from the head of the first list that is non-empty, with the given keys being checked in the order that they are given. - /// - /// - pub async fn blpop(&self, keys: K, timeout: f64) -> Result - where - R: RedisResponse, - K: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::lists::blpop(&self.inner, keys, timeout).await?.convert() - } - - /// BRPOP is a blocking list pop primitive. It is the blocking version of RPOP because it blocks the connection when there are no elements to pop from any of the - /// given lists. An element is popped from the tail of the first list that is non-empty, with the given keys being checked in the order that they are given. - /// - /// - pub async fn brpop(&self, keys: K, timeout: f64) -> Result - where - R: RedisResponse, - K: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::lists::brpop(&self.inner, keys, timeout).await?.convert() - } - - /// The blocking equivalent of [Self::rpoplpush]. - /// - /// - pub async fn brpoplpush(&self, source: S, destination: D, timeout: f64) -> Result - where - R: RedisResponse, - S: Into, - D: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::lists::brpoplpush(&self.inner, source, destination, timeout) - .await? - .convert() - } - - /// The blocking equivalent of [Self::lmove]. - /// - /// - pub async fn blmove( - &self, - source: S, - destination: D, - source_direction: LMoveDirection, - destination_direction: LMoveDirection, - timeout: f64, - ) -> Result - where - R: RedisResponse, - S: Into, - D: Into, - { - utils::disallow_during_transaction(&self.inner)?; - - commands::lists::blmove( - &self.inner, - source, - destination, - source_direction, - destination_direction, - timeout, - ) - .await? - .convert() - } - - /// Returns the element at index index in the list stored at key. - /// - /// - pub async fn lindex(&self, key: K, index: i64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::lists::lindex(&self.inner, key, index).await?.convert() - } - - /// Inserts element in the list stored at key either before or after the reference value `pivot`. - /// - /// - pub async fn linsert( - &self, - key: K, - location: ListLocation, - pivot: P, - element: V, - ) -> Result - where - R: RedisResponse, - K: Into, - P: TryInto, - P::Error: Into, - V: TryInto, - V::Error: Into, - { - commands::lists::linsert(&self.inner, key, location, to!(pivot)?, to!(element)?) - .await? - .convert() - } - - /// Returns the length of the list stored at key. - /// - /// - pub async fn llen(&self, key: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::lists::llen(&self.inner, key).await?.convert() - } - - /// Removes and returns the first elements of the list stored at key. - /// - /// - pub async fn lpop(&self, key: K, count: Option) -> Result - where - R: RedisResponse, - K: Into, - { - commands::lists::lpop(&self.inner, key, count).await?.convert() - } - - /// The command returns the index of matching elements inside a Redis list. - /// - /// - pub async fn lpos( - &self, - key: K, - element: V, - rank: Option, - count: Option, - maxlen: Option, - ) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::lists::lpos(&self.inner, key, to!(element)?, rank, count, maxlen) - .await? - .convert() - } - - /// Insert all the specified values at the head of the list stored at `key`. - /// - /// - pub async fn lpush(&self, key: K, elements: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::lists::lpush(&self.inner, key, to!(elements)?) - .await? - .convert() - } - - /// Inserts specified values at the head of the list stored at `key`, only if `key` already exists and holds a list. - /// - /// - pub async fn lpushx(&self, key: K, elements: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::lists::lpushx(&self.inner, key, to!(elements)?) - .await? - .convert() - } - - /// Returns the specified elements of the list stored at `key`. - /// - /// - pub async fn lrange(&self, key: K, start: i64, stop: i64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::lists::lrange(&self.inner, key, start, stop).await?.convert() - } - - /// Removes the first `count` occurrences of elements equal to `element` from the list stored at `key`. - /// - /// - pub async fn lrem(&self, key: K, count: i64, element: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::lists::lrem(&self.inner, key, count, to!(element)?) - .await? - .convert() - } - - /// Sets the list element at `index` to `element`. - /// - /// - pub async fn lset(&self, key: K, index: i64, element: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::lists::lset(&self.inner, key, index, to!(element)?) - .await? - .convert() - } - - /// Trim an existing list so that it will contain only the specified range of elements specified. - /// - /// - pub async fn ltrim(&self, key: K, start: i64, stop: i64) -> Result - where - R: RedisResponse, - K: Into, - { - commands::lists::ltrim(&self.inner, key, start, stop).await?.convert() - } - - /// Removes and returns the last elements of the list stored at `key`. - /// - /// - pub async fn rpop(&self, key: K, count: Option) -> Result - where - R: RedisResponse, - K: Into, - { - commands::lists::rpop(&self.inner, key, count).await?.convert() - } - - /// Atomically returns and removes the last element (tail) of the list stored at `source`, and pushes the element at the first element (head) of the list stored at `destination`. - /// - /// - pub async fn rpoplpush(&self, source: S, dest: D) -> Result - where - R: RedisResponse, - S: Into, - D: Into, - { - commands::lists::rpoplpush(&self.inner, source, dest).await?.convert() - } - - /// Atomically returns and removes the first/last element (head/tail depending on the source direction argument) of the list stored at `source`, and pushes - /// the element at the first/last element (head/tail depending on the destination direction argument) of the list stored at `destination`. - /// - /// - pub async fn lmove( - &self, - source: S, - dest: D, - source_direction: LMoveDirection, - dest_direction: LMoveDirection, - ) -> Result - where - R: RedisResponse, - S: Into, - D: Into, - { - commands::lists::lmove(&self.inner, source, dest, source_direction, dest_direction) - .await? - .convert() - } - - /// Insert all the specified values at the tail of the list stored at `key`. - /// - /// - pub async fn rpush(&self, key: K, elements: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::lists::rpush(&self.inner, key, to!(elements)?) - .await? - .convert() - } - - /// Inserts specified values at the tail of the list stored at `key`, only if key already exists and holds a list. - /// - /// - pub async fn rpushx(&self, key: K, elements: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::lists::rpushx(&self.inner, key, to!(elements)?) - .await? - .convert() - } - - // ------------- GEO -------------------- - - /// Adds the specified geospatial items (longitude, latitude, name) to the specified key. - /// - /// - pub async fn geoadd( - &self, - key: K, - options: Option, - changed: bool, - values: V, - ) -> Result - where - R: RedisResponse, - K: Into, - V: Into, - { - commands::geo::geoadd(&self.inner, key, options, changed, values) - .await? - .convert() - } - - /// Return valid Geohash strings representing the position of one or more elements in a sorted set value representing a geospatial index (where elements were added using GEOADD). - /// - /// - pub async fn geohash(&self, key: K, members: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::geo::geohash(&self.inner, key, to!(members)?).await?.convert() - } - - /// Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by the sorted set at key. - /// - /// Callers can use [as_geo_position](crate::types::RedisValue::as_geo_position) to lazily parse results as needed. - /// - /// - pub async fn geopos(&self, key: K, members: V) -> Result - where - K: Into, - V: TryInto, - V::Error: Into, - { - commands::geo::geopos(&self.inner, key, to!(members)?).await - } - - /// Return the distance between two members in the geospatial index represented by the sorted set. - /// - /// - pub async fn geodist(&self, key: K, src: S, dest: D, unit: Option) -> Result - where - R: RedisResponse, - K: Into, - S: TryInto, - S::Error: Into, - D: TryInto, - D::Error: Into, - { - commands::geo::geodist(&self.inner, key, to!(src)?, to!(dest)?, unit) - .await? - .convert() - } - - /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the borders of the area specified with - /// the center location and the maximum distance from the center (the radius). - /// - /// - pub async fn georadius( - &self, - key: K, - position: P, - radius: f64, - unit: GeoUnit, - withcoord: bool, - withdist: bool, - withhash: bool, - count: Option<(u64, Any)>, - ord: Option, - store: Option, - storedist: Option, - ) -> Result, RedisError> - where - K: Into, - P: Into, - { - commands::geo::georadius( - &self.inner, - key, - position, - radius, - unit, - withcoord, - withdist, - withhash, - count, - ord, - store, - storedist, - ) - .await - } - - /// This command is exactly like GEORADIUS with the sole difference that instead of taking, as the center of the area to query, a longitude and - /// latitude value, it takes the name of a member already existing inside the geospatial index represented by the sorted set. - /// - /// - pub async fn georadiusbymember( - &self, - key: K, - member: V, - radius: f64, - unit: GeoUnit, - withcoord: bool, - withdist: bool, - withhash: bool, - count: Option<(u64, Any)>, - ord: Option, - store: Option, - storedist: Option, - ) -> Result, RedisError> - where - K: Into, - V: TryInto, - V::Error: Into, - { - commands::geo::georadiusbymember( - &self.inner, - key, - to!(member)?, - radius, - unit, - withcoord, - withdist, - withhash, - count, - ord, - store, - storedist, - ) - .await - } - - /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the borders of the area specified by a given shape. - /// - /// - pub async fn geosearch( - &self, - key: K, - from_member: Option, - from_lonlat: Option, - by_radius: Option<(f64, GeoUnit)>, - by_box: Option<(f64, f64, GeoUnit)>, - ord: Option, - count: Option<(u64, Any)>, - withcoord: bool, - withdist: bool, - withhash: bool, - ) -> Result, RedisError> - where - K: Into, - { - commands::geo::geosearch( - &self.inner, - key, - from_member, - from_lonlat, - by_radius, - by_box, - ord, - count, - withcoord, - withdist, - withhash, - ) - .await - } - - /// This command is like GEOSEARCH, but stores the result in destination key. Returns the number of members added to the destination key. - /// - /// - pub async fn geosearchstore( - &self, - dest: D, - source: S, - from_member: Option, - from_lonlat: Option, - by_radius: Option<(f64, GeoUnit)>, - by_box: Option<(f64, f64, GeoUnit)>, - ord: Option, - count: Option<(u64, Any)>, - storedist: bool, - ) -> Result - where - R: RedisResponse, - D: Into, - S: Into, - { - commands::geo::geosearchstore( - &self.inner, - dest, - source, - from_member, - from_lonlat, - by_radius, - by_box, - ord, - count, - storedist, - ) - .await? - .convert() - } - - // ------------ HYPERLOGLOG -------------- - - /// Adds all the element arguments to the HyperLogLog data structure stored at the variable name specified as first argument. - /// - /// - pub async fn pfadd(&self, key: K, elements: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::hyperloglog::pfadd(&self.inner, key, to!(elements)?) - .await? - .convert() - } - - /// When called with a single key, returns the approximated cardinality computed by the HyperLogLog data structure stored at - /// the specified variable, which is 0 if the variable does not exist. - /// - /// When called with multiple keys, returns the approximated cardinality of the union of the HyperLogLogs passed, by - /// internally merging the HyperLogLogs stored at the provided keys into a temporary HyperLogLog. - /// - /// - pub async fn pfcount(&self, keys: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::hyperloglog::pfcount(&self.inner, keys).await?.convert() - } - - /// Merge multiple HyperLogLog values into an unique value that will approximate the cardinality of the union of the observed - /// sets of the source HyperLogLog structures. - /// - /// - pub async fn pfmerge(&self, dest: D, sources: S) -> Result - where - R: RedisResponse, - D: Into, - S: Into, - { - commands::hyperloglog::pfmerge(&self.inner, dest, sources) - .await? - .convert() - } - - // -------------- LUA ------------------ - - /// Load a script into the scripts cache, without executing it. After the specified command is loaded into the script cache it will be callable using EVALSHA with the correct SHA1 digest of the script. - /// - /// - pub async fn script_load(&self, script: S) -> Result - where - S: Into, - { - commands::lua::script_load(&self.inner, script).await?.convert() - } - - /// A clustered variant of [script_load](Self::script_load) that loads the script on all primary nodes in a cluster. - pub async fn script_load_cluster(&self, script: S) -> Result - where - S: Into, - { - commands::lua::script_load_cluster(&self.inner, script).await?.convert() - } - - /// Kills the currently executing Lua script, assuming no write operation was yet performed by the script. - /// - /// - pub async fn script_kill(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::lua::script_kill(&self.inner).await - } - - /// A clustered variant of the [script_kill](Self::script_kill) command that issues the command to all primary nodes in the cluster. - pub async fn script_kill_cluster(&self) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::lua::script_kill_cluster(&self.inner).await - } - - /// Flush the Lua scripts cache. - /// - /// - pub async fn script_flush(&self, r#async: bool) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::lua::script_flush(&self.inner, r#async).await - } - - /// A clustered variant of [script_flush](Self::script_flush) that flushes the script cache on all primary nodes in the cluster. - pub async fn script_flush_cluster(&self, r#async: bool) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::lua::script_flush_cluster(&self.inner, r#async).await - } - - /// Returns information about the existence of the scripts in the script cache. - /// - /// - pub async fn script_exists(&self, hashes: H) -> Result, RedisError> - where - H: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::lua::script_exists(&self.inner, hashes).await - } - - /// Set the debug mode for subsequent scripts executed with EVAL. - /// - /// - pub async fn script_debug(&self, flag: ScriptDebugFlag) -> Result<(), RedisError> { - utils::disallow_during_transaction(&self.inner)?; - commands::lua::script_debug(&self.inner, flag).await - } - - /// Evaluates a script cached on the server side by its SHA1 digest. - /// - /// - pub async fn evalsha(&self, hash: S, keys: K, args: V) -> Result - where - R: RedisResponse, - S: Into, - K: Into, - V: TryInto, - V::Error: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::lua::evalsha(&self.inner, hash, keys, to!(args)?) - .await? - .convert() - } - - /// Evaluate a Lua script on the server. - /// - /// - pub async fn eval(&self, script: S, keys: K, args: V) -> Result - where - R: RedisResponse, - S: Into, - K: Into, - V: TryInto, - V::Error: Into, - { - utils::disallow_during_transaction(&self.inner)?; - commands::lua::eval(&self.inner, script, keys, to!(args)?) - .await? - .convert() - } - - // --------------- SCANNING --------------- - - /// Incrementally iterate over a set of keys matching the `pattern` argument, returning `count` results per page, if specified. - /// - /// The scan operation can be canceled by dropping the returned stream. - /// - /// Note: scanning data in a cluster can be tricky. To make this easier this function supports [hash tags](https://redis.io/topics/cluster-spec#keys-hash-tags) in the - /// `pattern` so callers can direct scanning operations to specific nodes in the cluster. Callers can also use [split_cluster](Self::split_cluster) with this function if - /// hash tags are not used in the keys that should be scanned. - /// - /// - pub fn scan

( - &self, - pattern: P, - count: Option, - r#type: Option, - ) -> impl Stream> - where - P: Into, - { - commands::scan::scan(&self.inner, pattern, count, r#type) - } - - /// Incrementally iterate over pages of the hash map stored at `key`, returning `count` results per page, if specified. - /// - /// - pub fn hscan( - &self, - key: K, - pattern: P, - count: Option, - ) -> impl Stream> - where - K: Into, - P: Into, - { - commands::scan::hscan(&self.inner, key, pattern, count) - } - - /// Incrementally iterate over pages of the set stored at `key`, returning `count` results per page, if specified. - /// - /// - pub fn sscan( - &self, - key: K, - pattern: P, - count: Option, - ) -> impl Stream> - where - K: Into, - P: Into, - { - commands::scan::sscan(&self.inner, key, pattern, count) - } - - /// Incrementally iterate over pages of the sorted set stored at `key`, returning `count` results per page, if specified. - /// - /// - pub fn zscan( - &self, - key: K, - pattern: P, - count: Option, - ) -> impl Stream> - where - K: Into, - P: Into, - { - commands::scan::zscan(&self.inner, key, pattern, count) - } - - // --------------- STREAMS ---------------- -} - -#[cfg(test)] -mod tests { - - #[cfg(feature = "sha1-support")] - #[test] - fn should_correctly_sha1_hash() { - assert_eq!( - &util::sha1_hash("foobarbaz"), - "5f5513f8822fdbe5145af33b64d8d970dcf95c6e" - ); - assert_eq!(&util::sha1_hash("abc123"), "6367c48dd193d56ea7b0baad25b19455e529f5ee"); - assert_eq!( - &util::sha1_hash("jakdjfkldajfklej8a4tjkaldsnvkl43kjakljdvk42"), - "45c118f5de7c3fd3a4022135dc6acfb526f3c225" - ); - } -} diff --git a/src/clients/mod.rs b/src/clients/mod.rs new file mode 100644 index 00000000..edf26faa --- /dev/null +++ b/src/clients/mod.rs @@ -0,0 +1,16 @@ +mod redis; +mod transaction; +pub use redis::RedisClient; +pub use transaction::TransactionClient; + +#[cfg(feature = "sentinel-client")] +mod sentinel; +#[cfg(feature = "sentinel-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "sentinel-client")))] +pub use sentinel::{SentinelClient, SentinelConfig}; + +#[cfg(feature = "subscriber-client")] +mod pubsub; +#[cfg(feature = "subscriber-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "subscriber-client")))] +pub use pubsub::SubscriberClient; diff --git a/src/clients/pubsub.rs b/src/clients/pubsub.rs new file mode 100644 index 00000000..db0c37b4 --- /dev/null +++ b/src/clients/pubsub.rs @@ -0,0 +1,306 @@ +use crate::clients::RedisClient; +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{ + async_spawn, wrap_async, AsyncResult, AuthInterface, ClientLike, MetricsInterface, PubsubInterface, +}; +use crate::modules::inner::RedisClientInner; +use crate::types::{MultipleStrings, RedisConfig}; +use crate::utils; +use bytes_utils::Str; +use futures::future::join_all; +use futures::Stream; +use parking_lot::RwLock; +use std::collections::BTreeSet; +use std::fmt; +use std::fmt::Formatter; +use std::mem; +use std::sync::Arc; +use tokio::sync::mpsc::unbounded_channel; +use tokio::task::JoinHandle; +use tokio_stream::wrappers::UnboundedReceiverStream; +use tokio_stream::StreamExt; + +type ChannelSet = Arc>>; + +fn from_redis_client(client: RedisClient, channels: &ChannelSet, patterns: &ChannelSet) -> SubscriberClient { + SubscriberClient { + inner: client.inner, + patterns: patterns.clone(), + channels: channels.clone(), + } +} + +fn result_of_vec(vec: Vec>) -> Result<(), RedisError> { + vec.into_iter().collect() +} + +fn add_to_channels(channels: &ChannelSet, channel: Str) { + channels.write().insert(channel); +} + +fn remove_from_channels(channels: &ChannelSet, channel: &str) { + channels.write().remove(channel); +} + +#[derive(Clone, Debug, Eq, PartialEq)] +enum ReconnectOperation { + Subscribe, + PSubscribe, + Unsubscribe, + PUnsubscribe, +} + +fn concurrent_op( + client: &SubscriberClient, + channels: BTreeSet, + operation: ReconnectOperation, +) -> Vec> { + let client = client.clone(); + + channels + .into_iter() + .map(move |val| { + let (operation, client) = (operation.clone(), client.clone()); + wrap_async(|| async move { + match operation { + ReconnectOperation::Subscribe => client.subscribe(val).await.map(|_| ()), + ReconnectOperation::PSubscribe => client.psubscribe(val).await.map(|_| ()), + ReconnectOperation::Unsubscribe => client.unsubscribe(val).await.map(|_| ()), + ReconnectOperation::PUnsubscribe => client.punsubscribe(val).await.map(|_| ()), + } + }) + }) + .collect() +} + +/// A subscriber client that will manage subscription state to any pubsub channels or patterns for the caller. +/// +/// If the connection to the server closes for any reason this struct can automatically re-subscribe, etc. +/// +/// ```rust no_run +/// use fred::clients::SubscriberClient; +/// use fred::prelude::*; +/// use futures::stream::StreamExt; +/// +/// let subscriber = SubscriberClient::new(RedisConfig::default()); +/// let _ = subscriber.connect(Some(ReconnectPolicy::default())); +/// let _ = subscriber.wait_for_connect().await?; +/// // spawn a task that will automatically re-subscribe to channels and patterns as needed +/// let _ = subscriber.manage_subscriptions(); +/// +/// // do pubsub things +/// let jh = tokio::spawn(subscriber.on_message().for_each_concurrent(10, |(channel, message)| { +/// println!("Recv message {:?} on channel {}", message, channel); +/// Ok(()) +/// })); +/// +/// let _ = subscriber.subscribe("foo").await?; +/// let _ = subscriber.psubscribe("bar*").await?; +/// // if the subscriber connection closes now for any reason the client will automatically re-subscribe to "foo" and "bar*" +/// +/// // some convenience functions exist as well +/// println!("Tracking channels: {:?}", subscriber.tracked_channels()); +/// println!("Tracking patterns: {:?}", subscriber.tracked_patterns()); +/// +/// // or force a re-subscription at any time +/// let _ = subscriber.resubscribe_all().await?; +/// // or clear all the local state and unsubscribe +/// let _ = subscriber.unsubscribe_all().await?; +/// +/// // basic commands (AUTH, QUIT, INFO, PING, etc) work the same as the `RedisClient` +/// // additionally, tracing and metrics are supported in the same way as the `RedisClient` +/// let _ = subscriber.quit().await?; +/// let _ = jh.await; +/// ``` +#[derive(Clone)] +#[cfg_attr(docsrs, doc(cfg(feature = "subscriber-client")))] +pub struct SubscriberClient { + channels: ChannelSet, + patterns: ChannelSet, + inner: Arc, +} + +impl fmt::Debug for SubscriberClient { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("SubscriberClient") + .field("id", &self.inner.id) + .field("channels", &self.tracked_channels()) + .field("patterns", &self.tracked_patterns()) + .finish() + } +} + +impl ClientLike for SubscriberClient { + #[doc(hidden)] + fn inner(&self) -> &Arc { + &self.inner + } +} + +impl AuthInterface for SubscriberClient {} +impl MetricsInterface for SubscriberClient {} + +impl PubsubInterface for SubscriberClient { + fn subscribe(&self, channel: S) -> AsyncResult + where + S: Into, + { + into!(channel); + let cached_channels = self.channels.clone(); + async_spawn(self, |inner| async move { + let result = commands::pubsub::subscribe(&inner, channel.clone()).await; + if result.is_ok() { + add_to_channels(&cached_channels, channel); + } + result + }) + } + + fn psubscribe(&self, patterns: S) -> AsyncResult> + where + S: Into, + { + into!(patterns); + let cached_patterns = self.patterns.clone(); + async_spawn(self, |inner| async move { + let result = commands::pubsub::psubscribe(&inner, patterns.clone()).await; + if result.is_ok() { + for pattern in patterns.inner().into_iter() { + if let Some(pattern) = pattern.as_bytes_str() { + add_to_channels(&cached_patterns, pattern) + } + } + } + result + }) + } + + fn unsubscribe(&self, channel: S) -> AsyncResult + where + S: Into, + { + into!(channel); + let cached_channels = self.channels.clone(); + async_spawn(self, |inner| async move { + let result = commands::pubsub::unsubscribe(&inner, channel.clone()).await; + if result.is_ok() { + remove_from_channels(&cached_channels, &channel); + } + result + }) + } + + fn punsubscribe(&self, patterns: S) -> AsyncResult> + where + S: Into, + { + into!(patterns); + let cached_patterns = self.patterns.clone(); + async_spawn(self, |inner| async move { + let result = commands::pubsub::punsubscribe(&inner, patterns.clone()).await; + if result.is_ok() { + for pattern in patterns.inner().into_iter() { + if let Some(pattern) = pattern.as_bytes_str() { + remove_from_channels(&cached_patterns, &pattern) + } + } + } + result + }) + } +} + +impl SubscriberClient { + /// Create a new client instance without connecting to the server. + pub fn new(config: RedisConfig) -> SubscriberClient { + SubscriberClient { + channels: Arc::new(RwLock::new(BTreeSet::new())), + patterns: Arc::new(RwLock::new(BTreeSet::new())), + inner: RedisClientInner::new(config), + } + } + + /// Create a new `SubscriberClient` from the config provided to this client. + /// + /// The returned client will not be connected to the server, and it will use new connections after connecting. However, it will manage the same channel subscriptions as the original client. + pub fn clone_new(&self) -> Self { + let inner = RedisClientInner::new(utils::read_locked(&self.inner.config)); + + SubscriberClient { + inner, + channels: Arc::new(RwLock::new(self.channels.read().clone())), + patterns: Arc::new(RwLock::new(self.patterns.read().clone())), + } + } + + /// Listen for reconnection notifications. + /// + /// This function can be used to receive notifications whenever the client successfully reconnects in order to select the right database again, re-subscribe to channels, etc. + /// + /// A reconnection event is also triggered upon first connecting to the server. + pub fn on_reconnect(&self) -> impl Stream { + let (tx, rx) = unbounded_channel(); + self.inner().reconnect_tx.write().push_back(tx); + + let channels = self.channels.clone(); + let patterns = self.patterns.clone(); + UnboundedReceiverStream::new(rx).map(move |client| from_redis_client(client, &channels, &patterns)) + } + + /// Spawn a task that will automatically re-subscribe to any channels or channel patterns used by the client. + pub fn manage_subscriptions(&self) -> JoinHandle<()> { + let _self = self.clone(); + tokio::spawn(async move { + let mut stream = _self.on_reconnect(); + + while let Some(client) = stream.next().await { + if let Err(error) = client.resubscribe_all().await { + error!( + "{}: Failed to resubscribe to channels or patterns: {:?}", + client.id(), + error + ); + } + } + }) + } + + /// Read the set of channels that this client will manage. + pub fn tracked_channels(&self) -> BTreeSet { + self.channels.read().clone() + } + + /// Read the set of channel patterns that this client will manage. + pub fn tracked_patterns(&self) -> BTreeSet { + self.patterns.read().clone() + } + + /// Re-subscribe to any tracked channels and patterns concurrently. + /// + /// This can be used to sync the client's subscriptions with the server after calling `QUIT`, then `connect`, etc. + pub async fn resubscribe_all(&self) -> Result<(), RedisError> { + let channels = self.tracked_channels(); + let patterns = self.tracked_patterns(); + + let mut channel_tasks = concurrent_op(self, channels, ReconnectOperation::Subscribe); + let pattern_tasks = concurrent_op(self, patterns, ReconnectOperation::PSubscribe); + channel_tasks.extend(pattern_tasks); + + result_of_vec(join_all(channel_tasks).await)?; + Ok(()) + } + + /// Unsubscribe from all tracked channels and patterns, and remove them from the client cache. + pub async fn unsubscribe_all(&self) -> Result<(), RedisError> { + let channels = mem::replace(&mut *self.channels.write(), BTreeSet::new()); + let patterns = mem::replace(&mut *self.patterns.write(), BTreeSet::new()); + + let mut channel_tasks = concurrent_op(self, channels, ReconnectOperation::Unsubscribe); + let pattern_tasks = concurrent_op(self, patterns, ReconnectOperation::PUnsubscribe); + channel_tasks.extend(pattern_tasks); + + result_of_vec(join_all(channel_tasks).await)?; + Ok(()) + } +} diff --git a/src/clients/redis.rs b/src/clients/redis.rs new file mode 100644 index 00000000..02f44fb3 --- /dev/null +++ b/src/clients/redis.rs @@ -0,0 +1,238 @@ +use crate::commands; +use crate::error::{RedisError, RedisErrorKind}; +use crate::interfaces::{ + AclInterface, AuthInterface, ClientInterface, ClusterInterface, ConfigInterface, GeoInterface, HashesInterface, + HeartbeatInterface, HyperloglogInterface, KeysInterface, ListInterface, LuaInterface, MemoryInterface, + MetricsInterface, PubsubInterface, ServerInterface, SetsInterface, SlowlogInterface, SortedSetsInterface, + TransactionInterface, +}; +use crate::modules::inner::RedisClientInner; +use crate::prelude::{ClientLike, StreamsInterface}; +use crate::types::*; +use crate::utils; +use bytes_utils::Str; +use futures::Stream; +use std::fmt; +use std::sync::Arc; +use tokio::sync::mpsc::unbounded_channel; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// The primary Redis client struct. +#[derive(Clone)] +pub struct RedisClient { + pub(crate) inner: Arc, +} + +impl fmt::Display for RedisClient { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RedisClient") + .field("id", &self.inner.id) + .field("state", &self.state()) + .finish() + } +} + +#[doc(hidden)] +impl<'a> From<&'a Arc> for RedisClient { + fn from(inner: &'a Arc) -> RedisClient { + RedisClient { inner: inner.clone() } + } +} + +impl ClientLike for RedisClient { + #[doc(hidden)] + fn inner(&self) -> &Arc { + &self.inner + } +} + +impl AclInterface for RedisClient {} +impl ClientInterface for RedisClient {} +impl ClusterInterface for RedisClient {} +impl PubsubInterface for RedisClient {} +impl ConfigInterface for RedisClient {} +impl GeoInterface for RedisClient {} +impl HashesInterface for RedisClient {} +impl HyperloglogInterface for RedisClient {} +impl MetricsInterface for RedisClient {} +impl TransactionInterface for RedisClient {} +impl KeysInterface for RedisClient {} +impl LuaInterface for RedisClient {} +impl ListInterface for RedisClient {} +impl MemoryInterface for RedisClient {} +impl AuthInterface for RedisClient {} +impl ServerInterface for RedisClient {} +impl SlowlogInterface for RedisClient {} +impl SetsInterface for RedisClient {} +impl SortedSetsInterface for RedisClient {} +impl HeartbeatInterface for RedisClient {} +impl StreamsInterface for RedisClient {} + +impl RedisClient { + /// Create a new client instance without connecting to the server. + pub fn new(config: RedisConfig) -> RedisClient { + RedisClient { + inner: RedisClientInner::new(config), + } + } + + /// Create a new `RedisClient` from the config provided to this client. + /// + /// The returned client will not be connected to the server, and it will use new connections after connecting. + pub fn clone_new(&self) -> Self { + RedisClient::new(utils::read_locked(&self.inner.config)) + } + + /// Listen for reconnection notifications. + /// + /// This function can be used to receive notifications whenever the client successfully reconnects in order to select the right database again, re-subscribe to channels, etc. + /// + /// A reconnection event is also triggered upon first connecting to the server. + pub fn on_reconnect(&self) -> impl Stream { + let (tx, rx) = unbounded_channel(); + self.inner().reconnect_tx.write().push_back(tx); + + UnboundedReceiverStream::new(rx) + } + + /// Listen for notifications whenever the cluster state changes. + /// + /// This is usually triggered in response to a `MOVED` or `ASK` error, but can also happen when connections close unexpectedly. + pub fn on_cluster_change(&self) -> impl Stream> { + let (tx, rx) = unbounded_channel(); + self.inner().cluster_change_tx.write().push_back(tx); + + UnboundedReceiverStream::new(rx) + } + + /// Split a clustered Redis client into a list of centralized clients - one for each primary node in the cluster. + /// + /// Some Redis commands are not designed to work with hash slots against a clustered deployment. For example, + /// `FLUSHDB`, `PING`, etc all work on one node in the cluster, but no interface exists for the client to + /// select a specific node in the cluster against which to run the command. This function allows the caller to + /// create a list of clients such that each connect to one of the primary nodes in the cluster and functions + /// as if it were operating against a single centralized Redis server. + /// + /// **The clients returned by this function will not be connected to their associated servers. The caller needs to + /// call `connect` on each client before sending any commands.** + /// + /// Note: For this to work reliably this function needs to be called each time nodes are added or removed from the cluster. + pub async fn split_cluster(&self) -> Result, RedisError> { + if utils::is_clustered(&self.inner.config) { + commands::server::split(&self.inner).await + } else { + Err(RedisError::new( + RedisErrorKind::Unknown, + "Client is not using a clustered deployment.", + )) + } + } + + // --------------- SCANNING --------------- + // if/when `impl Trait` works inside traits we can move this to a trait + + /// Incrementally iterate over a set of keys matching the `pattern` argument, returning `count` results per page, if specified. + /// + /// The scan operation can be canceled by dropping the returned stream. + /// + /// Note: scanning data in a cluster can be tricky. To make this easier this function supports [hash tags](https://redis.io/topics/cluster-spec#keys-hash-tags) in the + /// `pattern` so callers can direct scanning operations to specific nodes in the cluster. Callers can also use [split_cluster](Self::split_cluster) with this function if + /// hash tags are not used in the keys that should be scanned. + /// + /// + pub fn scan

( + &self, + pattern: P, + count: Option, + r#type: Option, + ) -> impl Stream> + where + P: Into, + { + commands::scan::scan(&self.inner, pattern.into(), count, r#type) + } + + /// Run the `SCAN` command on each primary/main node in a cluster concurrently. + /// + /// In order for this function to work reliably the cluster state must not change while scanning. If nodes are added or removed, or hash slots are rebalanced, it may result + /// in missing keys or duplicate keys in the result stream. If callers need to support cluster scanning while the cluster state may change please see [split_cluster](Self::split_cluster). + /// + /// Unlike `SCAN`, `HSCAN`, etc, the returned stream may continue even if [has_more](crate::types::ScanResult::has_more) returns false on a given page of keys. + pub fn scan_cluster

( + &self, + pattern: P, + count: Option, + r#type: Option, + ) -> impl Stream> + where + P: Into, + { + commands::scan::scan_cluster(&self.inner, pattern.into(), count, r#type) + } + + /// Incrementally iterate over pages of the hash map stored at `key`, returning `count` results per page, if specified. + /// + /// + pub fn hscan( + &self, + key: K, + pattern: P, + count: Option, + ) -> impl Stream> + where + K: Into, + P: Into, + { + commands::scan::hscan(&self.inner, key, pattern.into(), count) + } + + /// Incrementally iterate over pages of the set stored at `key`, returning `count` results per page, if specified. + /// + /// + pub fn sscan( + &self, + key: K, + pattern: P, + count: Option, + ) -> impl Stream> + where + K: Into, + P: Into, + { + commands::scan::sscan(&self.inner, key, pattern.into(), count) + } + + /// Incrementally iterate over pages of the sorted set stored at `key`, returning `count` results per page, if specified. + /// + /// + pub fn zscan( + &self, + key: K, + pattern: P, + count: Option, + ) -> impl Stream> + where + K: Into, + P: Into, + { + commands::scan::zscan(&self.inner, key, pattern.into(), count) + } +} + +#[cfg(test)] +mod tests { + use crate::util; + + #[test] + fn should_correctly_sha1_hash() { + assert_eq!( + &util::sha1_hash("foobarbaz"), + "5f5513f8822fdbe5145af33b64d8d970dcf95c6e" + ); + assert_eq!(&util::sha1_hash("abc123"), "6367c48dd193d56ea7b0baad25b19455e529f5ee"); + assert_eq!( + &util::sha1_hash("jakdjfkldajfklej8a4tjkaldsnvkl43kjakljdvk42"), + "45c118f5de7c3fd3a4022135dc6acfb526f3c225" + ); + } +} diff --git a/src/clients/sentinel.rs b/src/clients/sentinel.rs new file mode 100644 index 00000000..e228ba73 --- /dev/null +++ b/src/clients/sentinel.rs @@ -0,0 +1,157 @@ +use crate::clients::redis::RedisClient; +use crate::interfaces::*; +use crate::modules::inner::RedisClientInner; +use crate::protocol::tls::TlsConfig; +use crate::types::{Blocking, PerformanceConfig, RedisConfig, ServerConfig}; +use futures::{Stream, StreamExt}; +use redis_protocol::resp3::prelude::RespVersion; +use std::default::Default; +use std::fmt; +use std::sync::Arc; +use tokio::sync::mpsc::unbounded_channel; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// Configuration options for sentinel clients. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(docsrs, doc(cfg(feature = "sentinel-client")))] +pub struct SentinelConfig { + /// The hostname for the sentinel node. + /// + /// Default: `127.0.0.1` + pub host: String, + /// The port on which the sentinel node is listening. + /// + /// Default: `26379` + pub port: u16, + /// An optional ACL username for the client to use when authenticating. If ACL rules are not configured this should be `None`. + /// + /// Default: `None` + pub username: Option, + /// An optional password for the client to use when authenticating. + /// + /// Default: `None` + pub password: Option, + /// TLS configuration fields. If `None` the connection will not use TLS. + /// + /// Default: `None` + #[cfg(feature = "enable-tls")] + #[cfg_attr(docsrs, doc(cfg(feature = "enable-tls")))] + pub tls: Option, + /// Whether or not to enable tracing for this client. + /// + /// Default: `false` + #[cfg(feature = "partial-tracing")] + #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] + pub tracing: bool, +} + +impl Default for SentinelConfig { + fn default() -> Self { + SentinelConfig { + host: "127.0.0.1".into(), + port: 26379, + username: None, + password: None, + #[cfg(feature = "enable-tls")] + tls: None, + #[cfg(feature = "partial-tracing")] + tracing: false, + } + } +} + +#[doc(hidden)] +impl From for RedisConfig { + fn from(config: SentinelConfig) -> Self { + RedisConfig { + server: ServerConfig::Centralized { + host: config.host, + port: config.port, + }, + fail_fast: true, + performance: PerformanceConfig { + pipeline: false, + ..Default::default() + }, + database: None, + blocking: Blocking::Block, + username: config.username, + password: config.password, + version: RespVersion::RESP2, + #[cfg(feature = "enable-tls")] + tls: config.tls, + #[cfg(feature = "partial-tracing")] + tracing: config.tracing, + } + } +} + +/// A struct for interacting directly with Sentinel nodes. +/// +/// This struct **will not** communicate with Redis servers behind the sentinel interface, but rather with the sentinel nodes themselves. Callers should use the [RedisClient](crate::clients::RedisClient) interface with a [ServerConfig::Sentinel](crate::types::ServerConfig::Sentinel) for interacting with Redis services behind a sentinel layer. +/// +/// See the [sentinel API docs](https://redis.io/topics/sentinel#sentinel-api) for more information. +#[derive(Clone)] +#[cfg_attr(docsrs, doc(cfg(feature = "sentinel-client")))] +pub struct SentinelClient { + inner: Arc, +} + +impl ClientLike for SentinelClient { + #[doc(hidden)] + fn inner(&self) -> &Arc { + &self.inner + } +} + +impl fmt::Display for SentinelClient { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SentinelClient") + .field("id", &self.inner.id) + .field("state", &self.state()) + .finish() + } +} + +#[doc(hidden)] +impl<'a> From<&'a Arc> for SentinelClient { + fn from(inner: &'a Arc) -> Self { + SentinelClient { inner: inner.clone() } + } +} + +#[doc(hidden)] +impl From for SentinelClient { + fn from(client: RedisClient) -> Self { + SentinelClient { inner: client.inner } + } +} + +impl SentinelInterface for SentinelClient {} +impl MetricsInterface for SentinelClient {} +impl AclInterface for SentinelClient {} +impl PubsubInterface for SentinelClient {} +impl ClientInterface for SentinelClient {} +impl AuthInterface for SentinelClient {} +impl HeartbeatInterface for SentinelClient {} + +impl SentinelClient { + /// Create a new client instance without connecting to the sentinel node. + pub fn new(config: SentinelConfig) -> SentinelClient { + SentinelClient { + inner: RedisClientInner::new(config.into()), + } + } + + /// Listen for reconnection notifications. + /// + /// This function can be used to receive notifications whenever the client successfully reconnects. + /// + /// A reconnection event is also triggered upon first connecting to the server. + pub fn on_reconnect(&self) -> impl Stream { + let (tx, rx) = unbounded_channel(); + self.inner.reconnect_tx.write().push_back(tx); + + UnboundedReceiverStream::new(rx).map(|client| client.into()) + } +} diff --git a/src/clients/transaction.rs b/src/clients/transaction.rs new file mode 100644 index 00000000..f6f30bc5 --- /dev/null +++ b/src/clients/transaction.rs @@ -0,0 +1,112 @@ +use crate::error::{RedisError, RedisErrorKind}; +use crate::interfaces::*; +use crate::modules::inner::RedisClientInner; +use crate::types::FromRedis; +use crate::utils::check_and_set_bool; +use crate::{commands, utils}; +use parking_lot::RwLock; +use std::fmt; +use std::sync::Arc; + +/// A client struct for commands in a MULTI/EXEC transaction block. +/// +/// This struct will use the same connection(s) as the client from which it was created. +pub struct TransactionClient { + inner: Arc, + finished: Arc>, +} + +impl fmt::Debug for TransactionClient { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TransactionClient") + .field("id", &self.inner.id) + .field("state", &self.inner.state.read()) + .finish() + } +} + +impl Drop for TransactionClient { + fn drop(&mut self) { + if !*self.finished.read() { + warn!( + "{}: Dropping transaction client without finishing transaction!", + self.inner.client_name() + ); + } + } +} + +impl ClientLike for TransactionClient { + #[doc(hidden)] + fn inner(&self) -> &Arc { + &self.inner + } +} + +impl AclInterface for TransactionClient {} +impl ClientInterface for TransactionClient {} +impl PubsubInterface for TransactionClient {} +impl ConfigInterface for TransactionClient {} +impl GeoInterface for TransactionClient {} +impl HashesInterface for TransactionClient {} +impl HyperloglogInterface for TransactionClient {} +impl MetricsInterface for TransactionClient {} +impl TransactionInterface for TransactionClient {} +impl KeysInterface for TransactionClient {} +impl ListInterface for TransactionClient {} +impl MemoryInterface for TransactionClient {} +impl AuthInterface for TransactionClient {} +impl ServerInterface for TransactionClient {} +impl SetsInterface for TransactionClient {} +impl SortedSetsInterface for TransactionClient {} + +impl TransactionClient { + /// Executes all previously queued commands in a transaction and restores the connection state to normal. + /// + /// + /// + /// Note: Automatic request retry policies in the event of a connection closing can present problems for transactions. + /// If the underlying connection closes while a transaction is in process the client will abort the transaction by + /// returning a `Canceled` error to the caller of any pending intermediate command, as well as this one. It's up to + /// the caller to retry transactions as needed. + pub async fn exec(self) -> Result + where + R: FromRedis, + { + if check_and_set_bool(&self.finished, true) { + return Err(RedisError::new( + RedisErrorKind::InvalidCommand, + "Transaction already finished.", + )); + } + commands::server::exec(&self.inner).await?.convert() + } + + /// Flushes all previously queued commands in a transaction and restores the connection state to normal. + /// + /// + pub async fn discard(self) -> Result<(), RedisError> { + if check_and_set_bool(&self.finished, true) { + return Err(RedisError::new( + RedisErrorKind::InvalidCommand, + "Transaction already finished.", + )); + } + commands::server::discard(&self.inner).await + } + + /// Read the hash slot against which this transaction will run, if known. + pub fn hash_slot(&self) -> Option { + utils::read_locked(&self.inner.multi_block).and_then(|b| b.hash_slot) + } +} + +#[doc(hidden)] +impl<'a> From<&'a Arc> for TransactionClient { + fn from(inner: &'a Arc) -> Self { + TransactionClient { + inner: inner.clone(), + finished: Arc::new(RwLock::new(false)), + } + } +} diff --git a/src/commands/acl.rs b/src/commands/impls/acl.rs similarity index 71% rename from src/commands/acl.rs rename to src/commands/impls/acl.rs index b2e9db5c..88f0fbb1 100644 --- a/src/commands/acl.rs +++ b/src/commands/impls/acl.rs @@ -5,6 +5,8 @@ use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; use crate::types::*; use crate::utils; +use bytes_utils::Str; +use redis_protocol::resp3::types::Frame; use std::sync::Arc; ok_cmd!(acl_load, AclLoad); @@ -13,18 +15,17 @@ values_cmd!(acl_list, AclList); values_cmd!(acl_users, AclUsers); value_cmd!(acl_whoami, AclWhoAmI); -pub async fn acl_setuser(inner: &Arc, username: S, rules: Vec) -> Result<(), RedisError> -where - S: Into, -{ - let username = username.into(); - +pub async fn acl_setuser( + inner: &Arc, + username: Str, + rules: Vec, +) -> Result<(), RedisError> { let frame = utils::request_response(inner, move || { let mut args = Vec::with_capacity(rules.len() + 1); args.push(username.into()); for rule in rules.into_iter() { - args.push(rule.to_string().into()); + args.push(rule.to_value()); } Ok((RedisCommandKind::AclSetUser, args)) @@ -35,19 +36,17 @@ where protocol_utils::expect_ok(&response) } -pub async fn acl_getuser(inner: &Arc, username: S) -> Result, RedisError> -where - S: Into, -{ - let username = username.into(); +pub async fn acl_getuser(inner: &Arc, username: Str) -> Result, RedisError> { let frame = utils::request_response(inner, move || Ok((RedisCommandKind::AclGetUser, vec![username.into()]))).await?; - if frame.is_null() { + if protocol_utils::is_null(&frame) { return Ok(None); } - if let Frame::Array(frames) = frame { - protocol_utils::parse_acl_getuser_frames(frames).map(|u| Some(u)) + let frame = protocol_utils::frame_map_or_set_to_nested_array(frame)?; + + if let Frame::Array { data, .. } = frame { + protocol_utils::parse_acl_getuser_frames(data).map(|u| Some(u)) } else { Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -56,21 +55,15 @@ where } } -pub async fn acl_deluser(inner: &Arc, usernames: S) -> Result -where - S: Into, -{ - let args: Vec = usernames.into().inner().into_iter().map(|k| k.into()).collect(); +pub async fn acl_deluser(inner: &Arc, usernames: MultipleKeys) -> Result { + let args: Vec = usernames.inner().into_iter().map(|k| k.into()).collect(); let frame = utils::request_response(inner, move || Ok((RedisCommandKind::AclDelUser, args))).await?; protocol_utils::frame_to_single_result(frame) } -pub async fn acl_cat(inner: &Arc, category: Option) -> Result -where - S: Into, -{ +pub async fn acl_cat(inner: &Arc, category: Option) -> Result { let args: Vec = if let Some(cat) = category { - vec![cat.into().into()] + vec![cat.into()] } else { Vec::new() }; @@ -91,7 +84,7 @@ pub async fn acl_genpass(inner: &Arc, bits: Option) -> Re } pub async fn acl_log_reset(inner: &Arc) -> Result<(), RedisError> { - let frame = utils::request_response(inner, || Ok((RedisCommandKind::AclLog, vec![RESET.into()]))).await?; + let frame = utils::request_response(inner, || Ok((RedisCommandKind::AclLog, vec![static_val!(RESET)]))).await?; let response = protocol_utils::frame_to_single_result(frame)?; protocol_utils::expect_ok(&response) } diff --git a/src/commands/client.rs b/src/commands/impls/client.rs similarity index 92% rename from src/commands/client.rs rename to src/commands/impls/client.rs index 0bdaa85b..3275b1d4 100644 --- a/src/commands/client.rs +++ b/src/commands/impls/client.rs @@ -4,6 +4,7 @@ use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; use crate::types::*; use crate::utils; +use bytes_utils::Str; use std::sync::Arc; value_cmd!(client_id, ClientID); @@ -43,12 +44,12 @@ where let mut args = Vec::with_capacity(max_args); if let Some(kind) = r#type { - args.push(TYPE.into()); + args.push(static_val!(TYPE)); args.push(kind.to_str().into()); } if let Some(ids) = ids { if !ids.is_empty() { - args.push(ID.into()); + args.push(static_val!(ID)); for id in ids.into_iter() { args.push(id.into()); @@ -86,11 +87,7 @@ pub async fn client_pause( value_cmd!(client_getname, ClientGetName); -pub async fn client_setname(inner: &Arc, name: S) -> Result<(), RedisError> -where - S: Into, -{ - let name = name.into(); +pub async fn client_setname(inner: &Arc, name: Str) -> Result<(), RedisError> { _warn!(inner, "Changing client name from {} to {}", inner.id.as_str(), name); let frame = @@ -120,7 +117,7 @@ where S: Into, { let id = id.into(); - let frame = utils::backchannel_request_response(inner, move || { + let frame = utils::backchannel_request_response(inner, true, move || { let mut args = Vec::with_capacity(2); args.push(id); diff --git a/src/commands/cluster.rs b/src/commands/impls/cluster.rs similarity index 84% rename from src/commands/cluster.rs rename to src/commands/impls/cluster.rs index a587c49d..c1cd77fc 100644 --- a/src/commands/cluster.rs +++ b/src/commands/impls/cluster.rs @@ -4,6 +4,7 @@ use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; use crate::types::*; use crate::utils; +use bytes_utils::Str; use std::convert::TryInto; use std::sync::Arc; @@ -39,15 +40,10 @@ where protocol_utils::expect_ok(&response) } -pub async fn cluster_count_failure_reports( +pub async fn cluster_count_failure_reports( inner: &Arc, - node_id: N, -) -> Result -where - N: Into, -{ - let node_id = node_id.into(); - + node_id: Str, +) -> Result { let frame = utils::request_response(inner, move || { Ok((RedisCommandKind::ClusterCountFailureReports, vec![node_id.into()])) }) @@ -104,11 +100,8 @@ pub async fn cluster_failover( protocol_utils::expect_ok(&response) } -pub async fn cluster_forget(inner: &Arc, node_id: S) -> Result<(), RedisError> -where - S: Into, -{ - one_arg_ok_cmd(inner, RedisCommandKind::ClusterForget, node_id.into().into()).await +pub async fn cluster_forget(inner: &Arc, node_id: Str) -> Result<(), RedisError> { + one_arg_ok_cmd(inner, RedisCommandKind::ClusterForget, node_id.into()).await } pub async fn cluster_get_keys_in_slot( @@ -134,30 +127,16 @@ where one_arg_value_cmd(inner, RedisCommandKind::ClusterKeySlot, key.into().into()).await } -pub async fn cluster_meet(inner: &Arc, ip: S, port: u16) -> Result<(), RedisError> -where - S: Into, -{ - args_ok_cmd( - inner, - RedisCommandKind::ClusterMeet, - vec![ip.into().into(), port.into()], - ) - .await +pub async fn cluster_meet(inner: &Arc, ip: Str, port: u16) -> Result<(), RedisError> { + args_ok_cmd(inner, RedisCommandKind::ClusterMeet, vec![ip.into(), port.into()]).await } -pub async fn cluster_replicate(inner: &Arc, node_id: S) -> Result<(), RedisError> -where - S: Into, -{ - one_arg_ok_cmd(inner, RedisCommandKind::ClusterReplicate, node_id.into().into()).await +pub async fn cluster_replicate(inner: &Arc, node_id: Str) -> Result<(), RedisError> { + one_arg_ok_cmd(inner, RedisCommandKind::ClusterReplicate, node_id.into()).await } -pub async fn cluster_replicas(inner: &Arc, node_id: S) -> Result -where - S: Into, -{ - one_arg_value_cmd(inner, RedisCommandKind::ClusterReplicas, node_id.into().into()).await +pub async fn cluster_replicas(inner: &Arc, node_id: Str) -> Result { + one_arg_value_cmd(inner, RedisCommandKind::ClusterReplicas, node_id.into()).await } pub async fn cluster_reset(inner: &Arc, mode: Option) -> Result<(), RedisError> { diff --git a/src/commands/config.rs b/src/commands/impls/config.rs similarity index 51% rename from src/commands/config.rs rename to src/commands/impls/config.rs index 3909b20e..1679052b 100644 --- a/src/commands/config.rs +++ b/src/commands/impls/config.rs @@ -2,21 +2,16 @@ use super::*; use crate::modules::inner::RedisClientInner; use crate::protocol::types::*; use crate::types::*; +use bytes_utils::Str; use std::sync::Arc; ok_cmd!(config_resetstat, ConfigResetStat); ok_cmd!(config_rewrite, ConfigRewrite); -pub async fn config_get(inner: &Arc, parameter: S) -> Result -where - S: Into, -{ - one_arg_values_cmd(inner, RedisCommandKind::ConfigGet, parameter.into().into()).await +pub async fn config_get(inner: &Arc, parameter: Str) -> Result { + one_arg_values_cmd(inner, RedisCommandKind::ConfigGet, parameter.into()).await } -pub async fn config_set

(inner: &Arc, parameter: P, value: RedisValue) -> Result<(), RedisError> -where - P: Into, -{ - args_ok_cmd(inner, RedisCommandKind::ConfigSet, vec![parameter.into().into(), value]).await +pub async fn config_set(inner: &Arc, parameter: Str, value: RedisValue) -> Result<(), RedisError> { + args_ok_cmd(inner, RedisCommandKind::ConfigSet, vec![parameter.into(), value]).await } diff --git a/src/commands/geo.rs b/src/commands/impls/geo.rs similarity index 87% rename from src/commands/geo.rs rename to src/commands/impls/geo.rs index 20414e6a..dc2129bf 100644 --- a/src/commands/geo.rs +++ b/src/commands/impls/geo.rs @@ -38,7 +38,7 @@ where args.push(options.to_str().into()); } if changed { - args.push(CHANGED.into()); + args.push(static_val!(CHANGED)); } for value in values.inner().into_iter() { @@ -160,30 +160,30 @@ where args.push(unit.to_str().into()); if withcoord { - args.push(WITH_COORD.into()); + args.push(static_val!(WITH_COORD)); } if withdist { - args.push(WITH_DIST.into()); + args.push(static_val!(WITH_DIST)); } if withhash { - args.push(WITH_HASH.into()); + args.push(static_val!(WITH_HASH)); } if let Some((count, any)) = count { - args.push(COUNT.into()); + args.push(static_val!(COUNT)); args.push(count.try_into()?); if any { - args.push(ANY.into()); + args.push(static_val!(ANY)); } } if let Some(ord) = ord { args.push(ord.to_str().into()); } if let Some(store) = store { - args.push(STORE.into()); + args.push(static_val!(STORE)); args.push(store.into()); } if let Some(store_dist) = storedist { - args.push(STORE_DIST.into()); + args.push(static_val!(STORE_DIST)); args.push(store_dist.into()); } @@ -221,30 +221,30 @@ where args.push(unit.to_str().into()); if withcoord { - args.push(WITH_COORD.into()); + args.push(static_val!(WITH_COORD)); } if withdist { - args.push(WITH_DIST.into()); + args.push(static_val!(WITH_DIST)); } if withhash { - args.push(WITH_HASH.into()); + args.push(static_val!(WITH_HASH)); } if let Some((count, any)) = count { - args.push(COUNT.into()); + args.push(static_val!(COUNT)); args.push(count.try_into()?); if any { - args.push(ANY.into()); + args.push(static_val!(ANY)); } } if let Some(ord) = ord { args.push(ord.to_str().into()); } if let Some(store) = store { - args.push(STORE.into()); + args.push(static_val!(STORE)); args.push(store.into()); } if let Some(store_dist) = storedist { - args.push(STORE_DIST.into()); + args.push(static_val!(STORE_DIST)); args.push(store_dist.into()); } @@ -278,22 +278,22 @@ where args.push(key.into()); if let Some(member) = from_member { - args.push(FROM_MEMBER.into()); + args.push(static_val!(FROM_MEMBER)); args.push(member); } if let Some(position) = from_lonlat { - args.push(FROM_LONLAT.into()); + args.push(static_val!(FROM_LONLAT)); args.push(position.longitude.try_into()?); args.push(position.latitude.try_into()?); } if let Some((radius, unit)) = by_radius { - args.push(BY_RADIUS.into()); + args.push(static_val!(BY_RADIUS)); args.push(radius.try_into()?); args.push(unit.to_str().into()); } if let Some((width, height, unit)) = by_box { - args.push(BY_BOX.into()); + args.push(static_val!(BY_BOX)); args.push(width.try_into()?); args.push(height.try_into()?); args.push(unit.to_str().into()); @@ -302,20 +302,20 @@ where args.push(ord.to_str().into()); } if let Some((count, any)) = count { - args.push(COUNT.into()); + args.push(static_val!(COUNT)); args.push(count.try_into()?); if any { - args.push(ANY.into()); + args.push(static_val!(ANY)); } } if withcoord { - args.push(WITH_COORD.into()); + args.push(static_val!(WITH_COORD)); } if withdist { - args.push(WITH_DIST.into()); + args.push(static_val!(WITH_DIST)); } if withhash { - args.push(WITH_HASH.into()); + args.push(static_val!(WITH_HASH)); } Ok((RedisCommandKind::GeoSearch, args)) @@ -348,21 +348,21 @@ where args.push(source.into()); if let Some(member) = from_member { - args.push(FROM_MEMBER.into()); + args.push(static_val!(FROM_MEMBER)); args.push(member); } if let Some(position) = from_lonlat { - args.push(FROM_LONLAT.into()); + args.push(static_val!(FROM_LONLAT)); args.push(position.longitude.try_into()?); args.push(position.latitude.try_into()?); } if let Some((radius, unit)) = by_radius { - args.push(BY_RADIUS.into()); + args.push(static_val!(BY_RADIUS)); args.push(radius.try_into()?); args.push(unit.to_str().into()); } if let Some((width, height, unit)) = by_box { - args.push(BY_BOX.into()); + args.push(static_val!(BY_BOX)); args.push(width.try_into()?); args.push(height.try_into()?); args.push(unit.to_str().into()); @@ -371,14 +371,14 @@ where args.push(ord.to_str().into()); } if let Some((count, any)) = count { - args.push(COUNT.into()); + args.push(static_val!(COUNT)); args.push(count.try_into()?); if any { - args.push(ANY.into()); + args.push(static_val!(ANY)); } } if storedist { - args.push(STORE_DIST.into()); + args.push(static_val!(STORE_DIST)); } Ok((RedisCommandKind::GeoSearchStore, args)) diff --git a/src/commands/hashes.rs b/src/commands/impls/hashes.rs similarity index 98% rename from src/commands/hashes.rs rename to src/commands/impls/hashes.rs index 3ab5a27d..b693096a 100644 --- a/src/commands/hashes.rs +++ b/src/commands/impls/hashes.rs @@ -201,7 +201,7 @@ where if let Some((count, with_values)) = count { args.push(count.into()); if with_values { - args.push(WITH_VALUES.into()); + args.push(static_val!(WITH_VALUES)); } } @@ -211,6 +211,7 @@ where if has_count { if has_values { + let frame = protocol_utils::flatten_frame(frame); protocol_utils::frame_to_map(frame).map(|m| RedisValue::Map(m)) } else { protocol_utils::frame_to_results(frame) diff --git a/src/commands/hyperloglog.rs b/src/commands/impls/hyperloglog.rs similarity index 100% rename from src/commands/hyperloglog.rs rename to src/commands/impls/hyperloglog.rs diff --git a/src/commands/keys.rs b/src/commands/impls/keys.rs similarity index 96% rename from src/commands/keys.rs rename to src/commands/impls/keys.rs index 2016588a..c88e1b90 100644 --- a/src/commands/keys.rs +++ b/src/commands/impls/keys.rs @@ -41,7 +41,7 @@ pub async fn set( args.push(options.to_str().into()); } if get { - args.push(GET.into()); + args.push(static_val!(GET)); } Ok((RedisCommandKind::Set, args)) @@ -212,17 +212,17 @@ where args.push(serialized); if replace { - args.push(REPLACE.into()); + args.push(static_val!(REPLACE)); } if absttl { - args.push(ABSTTL.into()); + args.push(static_val!(ABSTTL)); } if let Some(idletime) = idletime { - args.push(IDLE_TIME.into()); + args.push(static_val!(IDLE_TIME)); args.push(idletime.into()); } if let Some(frequency) = frequency { - args.push(FREQ.into()); + args.push(static_val!(FREQ)); args.push(frequency.into()); } @@ -294,10 +294,7 @@ where one_arg_value_cmd(inner, RedisCommandKind::Strlen, key.into().into()).await } -pub async fn mget(inner: &Arc, keys: K) -> Result -where - K: Into, -{ +pub async fn mget(inner: &Arc, keys: MultipleKeys) -> Result { let keys = keys.into(); utils::check_empty_keys(&keys)?; @@ -387,11 +384,11 @@ where args.push(destination.into()); if let Some(db) = db { - args.push(DB.into()); + args.push(static_val!(DB)); args.push(db.into()); } if replace { - args.push(REPLACE.into()); + args.push(static_val!(REPLACE)); } Ok((RedisCommandKind::Copy, args)) diff --git a/src/commands/lists.rs b/src/commands/impls/lists.rs similarity index 98% rename from src/commands/lists.rs rename to src/commands/impls/lists.rs index 69afbedf..96271a02 100644 --- a/src/commands/lists.rs +++ b/src/commands/impls/lists.rs @@ -179,15 +179,15 @@ where args.push(element); if let Some(rank) = rank { - args.push(RANK.into()); + args.push(static_val!(RANK)); args.push(rank.into()); } if let Some(count) = count { - args.push(COUNT.into()); + args.push(static_val!(COUNT)); args.push(count.into()); } if let Some(maxlen) = maxlen { - args.push(MAXLEN.into()); + args.push(static_val!(MAXLEN)); args.push(maxlen.into()); } diff --git a/src/commands/lua.rs b/src/commands/impls/lua.rs similarity index 81% rename from src/commands/lua.rs rename to src/commands/impls/lua.rs index db55ebce..ce35693a 100644 --- a/src/commands/lua.rs +++ b/src/commands/impls/lua.rs @@ -1,11 +1,12 @@ use super::*; -use crate::client::util::sha1_hash; use crate::error::*; use crate::modules::inner::RedisClientInner; use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; use crate::types::*; +use crate::util::sha1_hash; use crate::utils; +use bytes_utils::Str; use std::convert::TryInto; use std::sync::Arc; use tokio::sync::oneshot::channel as oneshot_channel; @@ -20,13 +21,7 @@ pub fn check_key_slot(inner: &Arc, keys: &Vec) -> Re let (mut cmd_server, mut cmd_slot) = (None, None); for key in keys.iter() { - let key_slot = match key.as_str() { - Some(k) => redis_protocol::redis_keyslot(k), - None => { - let key_str = String::from_utf8_lossy(key.as_bytes()); - redis_protocol::redis_keyslot(&key_str) - } - }; + let key_slot = redis_keyslot(key.as_bytes()); if let Some(slot) = cluster_state.get_server(key_slot) { if let Some(ref cmd_server) = cmd_server { @@ -54,19 +49,12 @@ pub fn check_key_slot(inner: &Arc, keys: &Vec) -> Re } } -pub async fn script_load(inner: &Arc, script: S) -> Result -where - S: Into, -{ - one_arg_value_cmd(inner, RedisCommandKind::ScriptLoad, script.into().into()).await +pub async fn script_load(inner: &Arc, script: Str) -> Result { + one_arg_value_cmd(inner, RedisCommandKind::ScriptLoad, script.into()).await } -pub async fn script_load_cluster(inner: &Arc, script: S) -> Result -where - S: Into, -{ +pub async fn script_load_cluster(inner: &Arc, script: Str) -> Result { let _ = utils::check_clustered(inner)?; - let script = script.into(); let hash = sha1_hash(&script); let (tx, rx) = oneshot_channel(); @@ -94,8 +82,8 @@ pub async fn script_kill_cluster(inner: &Arc) -> Result<(), Re pub async fn script_flush(inner: &Arc, r#async: bool) -> Result<(), RedisError> { let frame = utils::request_response(inner, move || { - let arg = if r#async { ASYNC } else { SYNC }; - Ok((RedisCommandKind::ScriptFlush, vec![arg.into()])) + let arg = static_val!(if r#async { ASYNC } else { SYNC }); + Ok((RedisCommandKind::ScriptFlush, vec![arg])) }) .await?; @@ -108,8 +96,8 @@ pub async fn script_flush_cluster(inner: &Arc, r#async: bool) let (tx, rx) = oneshot_channel(); let kind = RedisCommandKind::_ScriptFlushCluster(AllNodesResponse::new(tx)); - let arg = if r#async { ASYNC } else { SYNC }; - let command = RedisCommand::new(kind, vec![arg.into()], None); + let arg = static_val!(if r#async { ASYNC } else { SYNC }); + let command = RedisCommand::new(kind, vec![arg], None); let _ = utils::send_command(inner, command)?; let _ = rx.await??; @@ -160,17 +148,13 @@ pub async fn script_debug(inner: &Arc, flag: ScriptDebugFlag) protocol_utils::expect_ok(&response) } -pub async fn evalsha( +pub async fn evalsha( inner: &Arc, - hash: S, - keys: K, + hash: Str, + keys: MultipleKeys, cmd_args: MultipleValues, -) -> Result -where - S: Into, - K: Into, -{ - let (hash, keys) = (hash.into(), keys.into().inner()); +) -> Result { + let keys = keys.inner(); let custom_key_slot = check_key_slot(inner, &keys)?; let frame = utils::request_response(inner, move || { @@ -192,17 +176,13 @@ where protocol_utils::frame_to_results(frame) } -pub async fn eval( +pub async fn eval( inner: &Arc, - script: S, - keys: K, + script: Str, + keys: MultipleKeys, cmd_args: MultipleValues, -) -> Result -where - S: Into, - K: Into, -{ - let (script, keys) = (script.into(), keys.into().inner()); +) -> Result { + let keys = keys.inner(); let custom_key_slot = check_key_slot(inner, &keys)?; let frame = utils::request_response(inner, move || { diff --git a/src/commands/memory.rs b/src/commands/impls/memory.rs similarity index 86% rename from src/commands/memory.rs rename to src/commands/impls/memory.rs index f2e1791c..f3eea07c 100644 --- a/src/commands/memory.rs +++ b/src/commands/impls/memory.rs @@ -5,6 +5,7 @@ use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; use crate::types::*; use crate::utils; +use redis_protocol::resp3::types::Frame; use std::sync::Arc; pub async fn memory_doctor(inner: &Arc) -> Result { @@ -28,9 +29,11 @@ pub async fn memory_malloc_stats(inner: &Arc) -> Result) -> Result { - if let Frame::Array(frames) = utils::request_response(inner, || Ok((RedisCommandKind::MemoryStats, vec![]))).await? - { - protocol_utils::parse_memory_stats(&frames) + let response = utils::request_response(inner, || Ok((RedisCommandKind::MemoryStats, vec![]))).await?; + + let frame = protocol_utils::frame_map_or_set_to_nested_array(response)?; + if let Frame::Array { data, .. } = frame { + protocol_utils::parse_memory_stats(&data) } else { Err(RedisError::new( RedisErrorKind::ProtocolError, diff --git a/src/commands/impls/mod.rs b/src/commands/impls/mod.rs new file mode 100644 index 00000000..725953f7 --- /dev/null +++ b/src/commands/impls/mod.rs @@ -0,0 +1,174 @@ +use crate::error::RedisError; +use crate::modules::inner::RedisClientInner; +use crate::protocol::types::RedisCommandKind; +use crate::protocol::utils as protocol_utils; +use crate::types::RedisValue; +use crate::utils; +use std::sync::Arc; + +pub static MATCH: &'static str = "MATCH"; +pub static COUNT: &'static str = "COUNT"; +pub static TYPE: &'static str = "TYPE"; +pub static CHANGED: &'static str = "CH"; +pub static INCR: &'static str = "INCR"; +pub static WITH_SCORES: &'static str = "WITHSCORES"; +pub static LIMIT: &'static str = "LIMIT"; +pub static AGGREGATE: &'static str = "AGGREGATE"; +pub static WEIGHTS: &'static str = "WEIGHTS"; +pub static GET: &'static str = "GET"; +pub static RESET: &'static str = "RESET"; +pub static TO: &'static str = "TO"; +pub static FORCE: &'static str = "FORCE"; +pub static ABORT: &'static str = "ABORT"; +pub static TIMEOUT: &'static str = "TIMEOUT"; +pub static LEN: &'static str = "LEN"; +pub static DB: &'static str = "DB"; +pub static REPLACE: &'static str = "REPLACE"; +pub static ID: &'static str = "ID"; +pub static ANY: &'static str = "ANY"; +pub static STORE: &'static str = "STORE"; +pub static WITH_VALUES: &'static str = "WITHVALUES"; +pub static SYNC: &'static str = "SYNC"; +pub static ASYNC: &'static str = "ASYNC"; +pub static RANK: &'static str = "RANK"; +pub static MAXLEN: &'static str = "MAXLEN"; +pub static REV: &'static str = "REV"; +pub static ABSTTL: &'static str = "ABSTTL"; +pub static IDLE_TIME: &'static str = "IDLETIME"; +pub static FREQ: &'static str = "FREQ"; +pub static FULL: &'static str = "FULL"; +pub static NOMKSTREAM: &'static str = "NOMKSTREAM"; +pub static MINID: &'static str = "MINID"; +pub static BLOCK: &'static str = "BLOCK"; +pub static STREAMS: &'static str = "STREAMS"; +pub static MKSTREAM: &'static str = "MKSTREAM"; +pub static GROUP: &'static str = "GROUP"; +pub static NOACK: &'static str = "NOACK"; +pub static IDLE: &'static str = "IDLE"; +pub static TIME: &'static str = "TIME"; +pub static RETRYCOUNT: &'static str = "RETRYCOUNT"; +pub static JUSTID: &'static str = "JUSTID"; + +/// Macro to generate a command function that takes no arguments and expects an OK response - returning `()` to the caller. +macro_rules! ok_cmd( + ($name:ident, $cmd:tt) => { + pub async fn $name(inner: &Arc) -> Result<(), RedisError> { + let frame = crate::utils::request_response(inner, || Ok((RedisCommandKind::$cmd, vec![]))).await?; + let response = crate::protocol::utils::frame_to_single_result(frame)?; + crate::protocol::utils::expect_ok(&response) + } + } +); + +/// Macro to generate a command function that takes no arguments and returns a single `RedisValue` to the caller. +macro_rules! simple_cmd( + ($name:ident, $cmd:tt, $res:ty) => { + pub async fn $name(inner: &Arc) -> Result<$res, RedisError> { + let frame = crate::utils::request_response(inner, || Ok((RedisCommandKind::$cmd, vec![]))).await?; + crate::protocol::utils::frame_to_single_result(frame) + } + } +); + +/// Macro to generate a command function that takes no arguments and returns a single `RedisValue` to the caller. +macro_rules! value_cmd( + ($name:ident, $cmd:tt) => { + simple_cmd!($name, $cmd, RedisValue); + } +); + +/// Macro to generate a command function that takes no arguments and returns a potentially nested `RedisValue` to the caller. +macro_rules! values_cmd( + ($name:ident, $cmd:tt) => { + pub async fn $name(inner: &Arc) -> Result { + let frame = crate::utils::request_response(inner, || Ok((RedisCommandKind::$cmd, vec![]))).await?; + crate::protocol::utils::frame_to_results(frame) + } + } +); + +/// A function that issues a command that only takes one argument and returns a single `RedisValue`. +pub async fn one_arg_value_cmd( + inner: &Arc, + kind: RedisCommandKind, + arg: RedisValue, +) -> Result { + let frame = utils::request_response(inner, move || Ok((kind, vec![arg]))).await?; + protocol_utils::frame_to_single_result(frame) +} + +/// A function that issues a command that only takes one argument and returns a potentially nested `RedisValue`. +pub async fn one_arg_values_cmd( + inner: &Arc, + kind: RedisCommandKind, + arg: RedisValue, +) -> Result { + let frame = utils::request_response(inner, move || Ok((kind, vec![arg]))).await?; + protocol_utils::frame_to_results(frame) +} + +/// A function that issues a command that only takes one argument and expects an OK response - returning `()` to the caller. +pub async fn one_arg_ok_cmd( + inner: &Arc, + kind: RedisCommandKind, + arg: RedisValue, +) -> Result<(), RedisError> { + let frame = utils::request_response(inner, move || Ok((kind, vec![arg]))).await?; + + let response = protocol_utils::frame_to_single_result(frame)?; + protocol_utils::expect_ok(&response) +} + +/// A function that issues a command that takes any number of arguments and returns a single `RedisValue` to the caller. +pub async fn args_value_cmd( + inner: &Arc, + kind: RedisCommandKind, + args: Vec, +) -> Result { + let frame = utils::request_response(inner, move || Ok((kind, args))).await?; + protocol_utils::frame_to_single_result(frame) +} + +/// A function that issues a command that takes any number of arguments and returns a potentially nested `RedisValue` to the caller. +pub async fn args_values_cmd( + inner: &Arc, + kind: RedisCommandKind, + args: Vec, +) -> Result { + let frame = utils::request_response(inner, move || Ok((kind, args))).await?; + protocol_utils::frame_to_results(frame) +} + +/// A function that issues a command that takes any number of arguments and expects an OK response - returning `()` to the caller. +pub async fn args_ok_cmd( + inner: &Arc, + kind: RedisCommandKind, + args: Vec, +) -> Result<(), RedisError> { + let frame = utils::request_response(inner, move || Ok((kind, args))).await?; + let response = protocol_utils::frame_to_single_result(frame)?; + protocol_utils::expect_ok(&response) +} + +pub mod acl; +pub mod client; +pub mod cluster; +pub mod config; +pub mod geo; +pub mod hashes; +pub mod hyperloglog; +pub mod keys; +pub mod lists; +pub mod lua; +pub mod memory; +pub mod pubsub; +pub mod scan; +pub mod server; +pub mod sets; +pub mod slowlog; +pub mod sorted_sets; +pub mod streams; +pub mod strings; + +#[cfg(feature = "sentinel-client")] +pub mod sentinel; diff --git a/src/commands/pubsub.rs b/src/commands/impls/pubsub.rs similarity index 88% rename from src/commands/pubsub.rs rename to src/commands/impls/pubsub.rs index 4cbcac7f..23d47072 100644 --- a/src/commands/pubsub.rs +++ b/src/commands/impls/pubsub.rs @@ -5,16 +5,14 @@ use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; use crate::types::*; use crate::utils; +use bytes_utils::Str; use std::collections::VecDeque; use std::sync::Arc; -pub async fn subscribe(inner: &Arc, channel: S) -> Result -where - S: Into, -{ +pub async fn subscribe(inner: &Arc, channel: Str) -> Result { // note: if this ever changes to take in more than one channel then some additional work must be done // in the multiplexer to associate multiple responses with a single request - let results = one_arg_values_cmd(inner, RedisCommandKind::Subscribe, channel.into().into()).await?; + let results = one_arg_values_cmd(inner, RedisCommandKind::Subscribe, channel.into()).await?; // last value in the array is number of channels if let RedisValue::Array(mut values) = results { @@ -34,13 +32,10 @@ where } } -pub async fn unsubscribe(inner: &Arc, channel: S) -> Result -where - S: Into, -{ +pub async fn unsubscribe(inner: &Arc, channel: Str) -> Result { // note: if this ever changes to take in more than one channel then some additional work must be done // in the multiplexer to associate multiple responses with a single request - let results = one_arg_values_cmd(inner, RedisCommandKind::Unsubscribe, channel.into().into()).await?; + let results = one_arg_values_cmd(inner, RedisCommandKind::Unsubscribe, channel.into()).await?; // last value in the array is number of channels if let RedisValue::Array(mut values) = results { @@ -56,15 +51,11 @@ where } } -pub async fn publish( +pub async fn publish( inner: &Arc, - channel: S, + channel: Str, message: RedisValue, -) -> Result -where - S: Into, -{ - let channel = channel.into(); +) -> Result { let frame = utils::request_response(inner, move || { Ok((RedisCommandKind::Publish, vec![channel.into(), message])) }) diff --git a/src/commands/scan.rs b/src/commands/impls/scan.rs similarity index 64% rename from src/commands/scan.rs rename to src/commands/impls/scan.rs index 60dd072f..b2c6349a 100644 --- a/src/commands/scan.rs +++ b/src/commands/impls/scan.rs @@ -4,6 +4,7 @@ use crate::modules::inner::RedisClientInner; use crate::protocol::types::*; use crate::types::*; use crate::utils; +use bytes_utils::Str; use futures::stream::{Stream, TryStreamExt}; use std::sync::Arc; use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; @@ -11,15 +12,15 @@ use tokio_stream::wrappers::UnboundedReceiverStream; static STARTING_CURSOR: &'static str = "0"; -fn values_args(key: RedisKey, pattern: String, count: Option) -> Vec { +fn values_args(key: RedisKey, pattern: Str, count: Option) -> Vec { let mut args = Vec::with_capacity(6); args.push(key.into()); - args.push(STARTING_CURSOR.into()); - args.push(MATCH.into()); + args.push(static_val!(STARTING_CURSOR)); + args.push(static_val!(MATCH)); args.push(pattern.into()); if let Some(count) = count { - args.push(COUNT.into()); + args.push(static_val!(COUNT)); args.push(count.into()); } @@ -33,15 +34,68 @@ fn early_error(tx: &UnboundedSender>, e }); } -pub fn scan( +pub fn scan_cluster( inner: &Arc, - pattern: S, + pattern: Str, count: Option, r#type: Option, -) -> impl Stream> -where - S: Into, -{ +) -> impl Stream> { + let (tx, rx) = unbounded_channel(); + let err_tx = tx.clone(); + if let Err(e) = utils::disallow_during_transaction(inner) { + let _ = tokio::spawn(async move { + let _ = err_tx.send(Err(e)); + }); + return UnboundedReceiverStream::new(rx); + } + + let hash_slots: Vec = if let Some(ref state) = *inner.cluster_state.read() { + state.unique_hash_slots() + } else { + early_error( + &tx, + RedisError::new(RedisErrorKind::Cluster, "Invalid or missing cluster state."), + ); + return UnboundedReceiverStream::new(rx); + }; + + let mut args = Vec::with_capacity(7); + args.push(static_val!(STARTING_CURSOR)); + args.push(static_val!(MATCH)); + args.push(pattern.into()); + + if let Some(count) = count { + args.push(static_val!(COUNT)); + args.push(count.into()); + } + if let Some(r#type) = r#type { + args.push(static_val!(TYPE)); + args.push(r#type.to_str().into()); + } + + for slot in hash_slots.into_iter() { + let scan_inner = KeyScanInner { + key_slot: Some(slot), + tx: tx.clone(), + cursor: utils::static_str(STARTING_CURSOR), + }; + let cmd = RedisCommand::new(RedisCommandKind::Scan(scan_inner), args.clone(), None); + + if let Err(e) = utils::send_command(inner, cmd) { + early_error(&tx, e); + break; + } + } + + UnboundedReceiverStream::new(rx) +} + +pub fn scan( + inner: &Arc, + pattern: Str, + count: Option, + r#type: Option, +) -> impl Stream> { let (tx, rx) = unbounded_channel(); let err_tx = tx.clone(); if let Err(e) = utils::disallow_during_transaction(inner) { @@ -51,10 +105,9 @@ where return UnboundedReceiverStream::new(rx); } - let pattern = pattern.into(); let key_slot = if utils::is_clustered(&inner.config) { if utils::clustered_scan_pattern_has_hash_tag(inner, &pattern) { - Some(redis_keyslot(&pattern)) + Some(redis_keyslot(pattern.as_bytes())) } else { None } @@ -63,16 +116,16 @@ where }; let mut args = Vec::with_capacity(7); - args.push(STARTING_CURSOR.into()); - args.push(MATCH.into()); + args.push(static_val!(STARTING_CURSOR)); + args.push(static_val!(MATCH)); args.push(pattern.into()); if let Some(count) = count { - args.push(COUNT.into()); + args.push(static_val!(COUNT)); args.push(count.into()); } if let Some(r#type) = r#type { - args.push(TYPE.into()); + args.push(static_val!(TYPE)); args.push(r#type.to_str().into()); } @@ -80,7 +133,7 @@ where let scan = KeyScanInner { key_slot, tx, - cursor: STARTING_CURSOR.into(), + cursor: utils::static_str(STARTING_CURSOR), }; let cmd = RedisCommand::new(RedisCommandKind::Scan(scan), args, None); @@ -93,15 +146,14 @@ where UnboundedReceiverStream::new(rx) } -pub fn hscan( +pub fn hscan( inner: &Arc, key: K, - pattern: P, + pattern: Str, count: Option, ) -> impl Stream> where K: Into, - P: Into, { let (tx, rx) = unbounded_channel(); let should_send = if let Err(e) = utils::disallow_during_transaction(inner) { @@ -112,12 +164,11 @@ where }; if should_send { - let (key, pattern) = (key.into(), pattern.into()); - let args = values_args(key, pattern, count); + let args = values_args(key.into(), pattern, count); let err_tx = tx.clone(); let scan = ValueScanInner { tx, - cursor: STARTING_CURSOR.into(), + cursor: utils::static_str(STARTING_CURSOR), }; let cmd = RedisCommand::new(RedisCommandKind::Hscan(scan), args, None); @@ -136,15 +187,14 @@ where }) } -pub fn sscan( +pub fn sscan( inner: &Arc, key: K, - pattern: P, + pattern: Str, count: Option, ) -> impl Stream> where K: Into, - P: Into, { let (tx, rx) = unbounded_channel(); let should_send = if let Err(e) = utils::disallow_during_transaction(inner) { @@ -155,12 +205,11 @@ where }; if should_send { - let (key, pattern) = (key.into(), pattern.into()); - let args = values_args(key, pattern, count); + let args = values_args(key.into(), pattern, count); let err_tx = tx.clone(); let scan = ValueScanInner { tx, - cursor: STARTING_CURSOR.into(), + cursor: utils::static_str(STARTING_CURSOR), }; let cmd = RedisCommand::new(RedisCommandKind::Sscan(scan), args, None); @@ -179,15 +228,14 @@ where }) } -pub fn zscan( +pub fn zscan( inner: &Arc, key: K, - pattern: P, + pattern: Str, count: Option, ) -> impl Stream> where K: Into, - P: Into, { let (tx, rx) = unbounded_channel(); let should_send = if let Err(e) = utils::disallow_during_transaction(inner) { @@ -198,12 +246,11 @@ where }; if should_send { - let (key, pattern) = (key.into(), pattern.into()); - let args = values_args(key, pattern, count); + let args = values_args(key.into(), pattern, count); let err_tx = tx.clone(); let scan = ValueScanInner { tx, - cursor: STARTING_CURSOR.into(), + cursor: utils::static_str(STARTING_CURSOR), }; let cmd = RedisCommand::new(RedisCommandKind::Zscan(scan), args, None); diff --git a/src/commands/impls/sentinel.rs b/src/commands/impls/sentinel.rs new file mode 100644 index 00000000..fce46a1a --- /dev/null +++ b/src/commands/impls/sentinel.rs @@ -0,0 +1,188 @@ +use super::*; +use crate::error::RedisError; +use crate::modules::inner::RedisClientInner; +use crate::protocol::types::*; +use crate::protocol::utils as protocol_utils; +use crate::types::*; +use crate::utils; +use bytes_utils::Str; +use std::net::IpAddr; +use std::sync::Arc; + +pub async fn config_get(inner: &Arc, name: Str) -> Result { + let frame = utils::request_response(inner, move || { + let args = vec![static_val!("CONFIG"), static_val!(GET), name.into()]; + Ok((RedisCommandKind::Sentinel, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn config_set( + inner: &Arc, + name: Str, + value: RedisValue, +) -> Result { + let frame = utils::request_response(inner, move || { + Ok(( + RedisCommandKind::Sentinel, + vec![static_val!("CONFIG"), static_val!("SET"), name.into(), value], + )) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn ckquorum(inner: &Arc, name: Str) -> Result { + let frame = utils::request_response(inner, move || { + Ok((RedisCommandKind::Sentinel, vec![static_val!("CKQUORUM"), name.into()])) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn flushconfig(inner: &Arc) -> Result { + args_values_cmd(inner, RedisCommandKind::Sentinel, vec![static_val!("FLUSHCONFIG")]).await +} + +pub async fn failover(inner: &Arc, name: Str) -> Result { + let frame = utils::request_response(inner, move || { + Ok((RedisCommandKind::Sentinel, vec![static_val!("FAILOVER"), name.into()])) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn get_master_addr_by_name(inner: &Arc, name: Str) -> Result { + let frame = utils::request_response(inner, move || { + Ok(( + RedisCommandKind::Sentinel, + vec![static_val!("GET-MASTER-ADDR-BY-NAME"), name.into()], + )) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn info_cache(inner: &Arc) -> Result { + args_values_cmd(inner, RedisCommandKind::Sentinel, vec![static_val!("INFO-CACHE")]).await +} + +pub async fn masters(inner: &Arc) -> Result { + args_values_cmd(inner, RedisCommandKind::Sentinel, vec![static_val!("MASTERS")]).await +} + +pub async fn master(inner: &Arc, name: Str) -> Result { + let frame = utils::request_response(inner, move || { + Ok((RedisCommandKind::Sentinel, vec![static_val!("MASTER"), name.into()])) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn monitor( + inner: &Arc, + name: Str, + ip: IpAddr, + port: u16, + quorum: u32, +) -> Result { + let ip = ip.to_string(); + let frame = utils::request_response(inner, move || { + Ok(( + RedisCommandKind::Sentinel, + vec![ + static_val!("MONITOR"), + name.into(), + ip.into(), + port.into(), + quorum.into(), + ], + )) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn myid(inner: &Arc) -> Result { + args_values_cmd(inner, RedisCommandKind::Sentinel, vec![static_val!("MYID")]).await +} + +pub async fn pending_scripts(inner: &Arc) -> Result { + args_values_cmd(inner, RedisCommandKind::Sentinel, vec![static_val!("PENDING-SCRIPTS")]).await +} + +pub async fn remove(inner: &Arc, name: Str) -> Result { + let frame = utils::request_response(inner, move || { + Ok((RedisCommandKind::Sentinel, vec![static_val!("REMOVE"), name.into()])) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn replicas(inner: &Arc, name: Str) -> Result { + let frame = utils::request_response(inner, move || { + Ok((RedisCommandKind::Sentinel, vec![static_val!("REPLICAS"), name.into()])) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn sentinels(inner: &Arc, name: Str) -> Result { + let frame = utils::request_response(inner, move || { + Ok((RedisCommandKind::Sentinel, vec![static_val!("SENTINELS"), name.into()])) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn set(inner: &Arc, name: Str, options: RedisMap) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(2 + options.len()); + args.push(static_val!("SET")); + args.push(name.into()); + + for (key, value) in options.inner().into_iter() { + args.push(key.into()); + args.push(value); + } + Ok((RedisCommandKind::Sentinel, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn simulate_failure( + inner: &Arc, + kind: SentinelFailureKind, +) -> Result { + let frame = utils::request_response(inner, move || { + Ok(( + RedisCommandKind::Sentinel, + vec![static_val!("SIMULATE-FAILURE"), kind.to_str().into()], + )) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn reset(inner: &Arc, pattern: Str) -> Result { + let frame = utils::request_response(inner, move || { + Ok((RedisCommandKind::Sentinel, vec![static_val!("RESET"), pattern.into()])) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} diff --git a/src/commands/server.rs b/src/commands/impls/server.rs similarity index 80% rename from src/commands/server.rs rename to src/commands/impls/server.rs index c4cee610..e96ba959 100644 --- a/src/commands/server.rs +++ b/src/commands/impls/server.rs @@ -1,11 +1,13 @@ use super::*; -use crate::client::RedisClient; +use crate::clients::RedisClient; use crate::error::*; use crate::modules::inner::RedisClientInner; +use crate::prelude::Resp3Frame; use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; use crate::types::*; use crate::utils; +use bytes_utils::Str; use parking_lot::RwLock; use std::sync::Arc; use tokio::sync::oneshot::channel as oneshot_channel; @@ -68,7 +70,7 @@ pub async fn split(inner: &Arc) -> Result, Re } pub async fn flushall(inner: &Arc, r#async: bool) -> Result { - let args = if r#async { vec![ASYNC.into()] } else { Vec::new() }; + let args = if r#async { vec![static_val!(ASYNC)] } else { Vec::new() }; let frame = utils::request_response(inner, move || Ok((RedisCommandKind::FlushAll, args))).await?; protocol_utils::frame_to_single_result(frame) @@ -102,7 +104,7 @@ pub async fn select(inner: &Arc, db: u8) -> Result, section: Option) -> Result { let frame = utils::request_response(inner, move || { - let mut args = Vec::new(); + let mut args = Vec::with_capacity(1); if let Some(section) = section { args.push(section.to_str().into()); } @@ -131,12 +133,33 @@ pub async fn discard(inner: &Arc) -> Result<(), RedisError> { Ok(()) } -pub async fn auth(inner: &Arc, username: Option, password: V) -> Result<(), RedisError> -where - V: Into, -{ - let password = password.into(); +pub async fn hello( + inner: &Arc, + version: RespVersion, + auth: Option<(String, String)>, +) -> Result<(), RedisError> { + let args = if let Some((username, password)) = auth { + vec![username.into(), password.into()] + } else { + vec![] + }; + if utils::is_clustered(&inner.config) { + let (tx, rx) = oneshot_channel(); + let kind = RedisCommandKind::_HelloAllCluster((AllNodesResponse::new(tx), version)); + let command = RedisCommand::new(kind, args, None); + let _ = utils::send_command(inner, command)?; + let _ = rx.await??; + + Ok(()) + } else { + let frame = utils::request_response(inner, move || Ok((RedisCommandKind::Hello(version), args))).await?; + let _ = protocol_utils::frame_to_results(frame)?; + Ok(()) + } +} + +pub async fn auth(inner: &Arc, username: Option, password: Str) -> Result<(), RedisError> { if utils::is_clustered(&inner.config) { let mut args = Vec::with_capacity(2); if let Some(username) = username { @@ -176,6 +199,14 @@ pub async fn custom( args_values_cmd(inner, RedisCommandKind::_Custom(cmd), args).await } +pub async fn custom_raw( + inner: &Arc, + cmd: CustomCommand, + args: Vec, +) -> Result { + utils::request_response(inner, move || Ok((RedisCommandKind::_Custom(cmd), args))).await +} + value_cmd!(dbsize, DBSize); value_cmd!(bgrewriteaof, BgreWriteAof); value_cmd!(bgsave, BgSave); @@ -190,18 +221,18 @@ pub async fn failover( let frame = utils::request_response(inner, move || { let mut args = Vec::with_capacity(7); if let Some((host, port)) = to { - args.push(TO.into()); + args.push(static_val!(TO)); args.push(host.into()); args.push(port.into()); } if force { - args.push(FORCE.into()); + args.push(static_val!(FORCE)); } if abort { - args.push(ABORT.into()); + args.push(static_val!(ABORT)); } if let Some(timeout) = timeout { - args.push(TIMEOUT.into()); + args.push(static_val!(TIMEOUT)); args.push(timeout.into()); } diff --git a/src/commands/sets.rs b/src/commands/impls/sets.rs similarity index 100% rename from src/commands/sets.rs rename to src/commands/impls/sets.rs diff --git a/src/commands/slowlog.rs b/src/commands/impls/slowlog.rs similarity index 83% rename from src/commands/slowlog.rs rename to src/commands/impls/slowlog.rs index 6808ee2d..bacfbb82 100644 --- a/src/commands/slowlog.rs +++ b/src/commands/impls/slowlog.rs @@ -5,12 +5,13 @@ use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; use crate::types::*; use crate::utils; +use redis_protocol::resp3::types::Frame; use std::sync::Arc; pub async fn slowlog_get(inner: &Arc, count: Option) -> Result, RedisError> { let frame = utils::request_response(inner, move || { let mut args = Vec::with_capacity(2); - args.push(GET.into()); + args.push(static_val!(GET)); if let Some(count) = count { args.push(count.into()); @@ -20,8 +21,8 @@ pub async fn slowlog_get(inner: &Arc, count: Option) -> R }) .await?; - if let Frame::Array(frames) = frame { - protocol_utils::parse_slowlog_entries(frames) + if let Frame::Array { data, .. } = frame { + protocol_utils::parse_slowlog_entries(data) } else { Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -45,5 +46,5 @@ pub async fn slowlog_length(inner: &Arc) -> Result) -> Result<(), RedisError> { - args_ok_cmd(inner, RedisCommandKind::Slowlog, vec![RESET.into()]).await + args_ok_cmd(inner, RedisCommandKind::Slowlog, vec![static_val!(RESET)]).await } diff --git a/src/commands/sorted_sets.rs b/src/commands/impls/sorted_sets.rs similarity index 92% rename from src/commands/sorted_sets.rs rename to src/commands/impls/sorted_sets.rs index 1aeda337..dc0cb46e 100644 --- a/src/commands/sorted_sets.rs +++ b/src/commands/impls/sorted_sets.rs @@ -5,6 +5,7 @@ use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; use crate::types::*; use crate::utils; +use redis_protocol::resp3::types::Frame; use std::convert::TryInto; use std::str; use std::sync::Arc; @@ -66,7 +67,9 @@ fn frames_to_bzpop_result(mut frames: Vec) -> Result bytes_to_f64(&b)?, + Frame::SimpleString { data, .. } => bytes_to_f64(&data)?, + Frame::BlobString { data, .. } => bytes_to_f64(&data)?, + Frame::Double { data, .. } => data, _ => { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -76,7 +79,8 @@ fn frames_to_bzpop_result(mut frames: Vec) -> Result String::from_utf8(b)?.into(), + Frame::SimpleString { data, .. } => data.into(), + Frame::BlobString { data, .. } => data.into(), _ => { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -109,10 +113,10 @@ where }) .await?; - if let Frame::Array(frames) = frame { - frames_to_bzpop_result(frames) + if let Frame::Array { data, .. } = frame { + frames_to_bzpop_result(data) } else { - if frame.is_null() { + if protocol_utils::is_null(&frame) { Ok(None) } else { Err(RedisError::new(RedisErrorKind::ProtocolError, "Expected nil or array.")) @@ -141,10 +145,10 @@ where }) .await?; - if let Frame::Array(frames) = frame { - frames_to_bzpop_result(frames) + if let Frame::Array { data, .. } = frame { + frames_to_bzpop_result(data) } else { - if frame.is_null() { + if protocol_utils::is_null(&frame) { Ok(None) } else { Err(RedisError::new(RedisErrorKind::ProtocolError, "Expected nil or array.")) @@ -177,10 +181,10 @@ where args.push(ordering.to_str().into()); } if changed { - args.push(CHANGED.into()); + args.push(static_val!(CHANGED)); } if incr { - args.push(INCR.into()); + args.push(static_val!(INCR)); } for (score, value) in values.inner().into_iter() { @@ -223,7 +227,7 @@ where args.push(key.into()); } if withscores { - args.push(WITH_SCORES.into()); + args.push(static_val!(WITH_SCORES)); } Ok((RedisCommandKind::Zdiff, args)) @@ -289,17 +293,17 @@ where args.push(key.into()); } if weights.len() > 0 { - args.push(WEIGHTS.into()); + args.push(static_val!(WEIGHTS)); for weight in weights.inner().into_iter() { args.push(weight.try_into()?); } } if let Some(options) = aggregate { - args.push(AGGREGATE.into()); + args.push(static_val!(AGGREGATE)); args.push(options.to_str().into()); } if withscores { - args.push(WITH_SCORES.into()); + args.push(static_val!(WITH_SCORES)); } Ok((RedisCommandKind::Zinter, args)) @@ -332,13 +336,13 @@ where args.push(key.into()); } if weights.len() > 0 { - args.push(WEIGHTS.into()); + args.push(static_val!(WEIGHTS)); for weight in weights.inner().into_iter() { args.push(weight.try_into()?); } } if let Some(options) = aggregate { - args.push(AGGREGATE.into()); + args.push(static_val!(AGGREGATE)); args.push(options.to_str().into()); } @@ -407,7 +411,7 @@ where if let Some((count, withscores)) = count { args.push(count.into()); if withscores { - args.push(WITH_SCORES.into()); + args.push(static_val!(WITH_SCORES)); } } @@ -446,10 +450,10 @@ where args.push(sort.to_str().into()); } if rev { - args.push(REV.into()); + args.push(static_val!(REV)); } if let Some((offset, count)) = limit { - args.push(LIMIT.into()); + args.push(static_val!(LIMIT)); args.push(offset.into()); args.push(count.into()); } @@ -487,15 +491,15 @@ where args.push(sort.to_str().into()); } if rev { - args.push(REV.into()); + args.push(static_val!(REV)); } if let Some((offset, count)) = limit { - args.push(LIMIT.into()); + args.push(static_val!(LIMIT)); args.push(offset.into()); args.push(count.into()); } if withscores { - args.push(WITH_SCORES.into()); + args.push(static_val!(WITH_SCORES)); } Ok((RedisCommandKind::Zrange, args)) @@ -525,7 +529,7 @@ where args.push(max.into_value()?); if let Some((offset, count)) = limit { - args.push(LIMIT.into()); + args.push(static_val!(LIMIT)); args.push(offset.into()); args.push(count.into()); } @@ -557,7 +561,7 @@ where args.push(min.into_value()?); if let Some((offset, count)) = limit { - args.push(LIMIT.into()); + args.push(static_val!(LIMIT)); args.push(offset.into()); args.push(count.into()); } @@ -588,10 +592,10 @@ where args.push(max.into_value()?); if withscores { - args.push(WITH_SCORES.into()); + args.push(static_val!(WITH_SCORES)); } if let Some((offset, count)) = limit { - args.push(LIMIT.into()); + args.push(static_val!(LIMIT)); args.push(offset.into()); args.push(count.into()); } @@ -622,10 +626,10 @@ where args.push(min.into_value()?); if withscores { - args.push(WITH_SCORES.into()); + args.push(static_val!(WITH_SCORES)); } if let Some((offset, count)) = limit { - args.push(LIMIT.into()); + args.push(static_val!(LIMIT)); args.push(offset.into()); args.push(count.into()); } @@ -741,7 +745,7 @@ where args.push(stop); if withscores { - args.push(WITH_SCORES.into()); + args.push(static_val!(WITH_SCORES)); } Ok((RedisCommandKind::Zrevrange, args)) @@ -786,18 +790,18 @@ where args.push(key.into()); } if weights.len() > 0 { - args.push(WEIGHTS.into()); + args.push(static_val!(WEIGHTS)); for weight in weights.inner().into_iter() { args.push(weight.try_into()?); } } if let Some(aggregate) = aggregate { - args.push(AGGREGATE.into()); + args.push(static_val!(AGGREGATE)); args.push(aggregate.to_str().into()); } if withscores { - args.push(WITH_SCORES.into()); + args.push(static_val!(WITH_SCORES)); } Ok((RedisCommandKind::Zunion, args)) @@ -830,14 +834,14 @@ where args.push(key.into()); } if weights.len() > 0 { - args.push(WEIGHTS.into()); + args.push(static_val!(WEIGHTS)); for weight in weights.inner().into_iter() { args.push(weight.try_into()?); } } if let Some(aggregate) = aggregate { - args.push(AGGREGATE.into()); + args.push(static_val!(AGGREGATE)); args.push(aggregate.to_str().into()); } diff --git a/src/commands/impls/streams.rs b/src/commands/impls/streams.rs new file mode 100644 index 00000000..4c01c9b6 --- /dev/null +++ b/src/commands/impls/streams.rs @@ -0,0 +1,499 @@ +use super::*; +use crate::error::RedisError; +use crate::modules::inner::RedisClientInner; +use crate::protocol::types::RedisCommandKind; +use crate::protocol::utils as protocol_utils; +use crate::types::{ + MultipleIDs, MultipleKeys, MultipleOrderedPairs, MultipleStrings, RedisKey, RedisValue, XCap, XPendingArgs, XID, +}; +use crate::utils; +use bytes_utils::Str; +use redis_protocol::redis_keyslot; +use std::convert::TryInto; +use std::sync::Arc; + +fn encode_cap(args: &mut Vec, cap: XCap) { + if let Some((kind, trim, threshold, limit)) = cap.into_parts() { + args.push(kind.to_str().into()); + args.push(trim.to_str().into()); + args.push(threshold.into_arg()); + if let Some(count) = limit { + args.push(static_val!(LIMIT)); + args.push(count.into()); + } + } +} + +pub async fn xinfo_consumers( + inner: &Arc, + key: RedisKey, + groupname: Str, +) -> Result { + let frame = utils::request_response(inner, move || { + let args = vec![key.into(), groupname.into()]; + Ok((RedisCommandKind::XinfoConsumers, args)) + }) + .await?; + + protocol_utils::frame_to_results_raw(frame) +} + +pub async fn xinfo_groups(inner: &Arc, key: RedisKey) -> Result { + let frame = utils::request_response(inner, move || Ok((RedisCommandKind::XinfoGroups, vec![key.into()]))).await?; + protocol_utils::frame_to_results_raw(frame) +} + +pub async fn xinfo_stream( + inner: &Arc, + key: RedisKey, + full: bool, + count: Option, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(4); + args.push(key.into()); + + if full { + args.push(static_val!(FULL)); + if let Some(count) = count { + args.push(static_val!(COUNT)); + args.push(count.try_into()?); + } + } + + Ok((RedisCommandKind::XinfoStream, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xadd( + inner: &Arc, + key: RedisKey, + nomkstream: bool, + cap: XCap, + id: XID, + fields: MultipleOrderedPairs, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(8 + (fields.len() * 2)); + args.push(key.into()); + + if nomkstream { + args.push(static_val!(NOMKSTREAM)); + } + encode_cap(&mut args, cap); + + args.push(id.into_str().into()); + for (key, value) in fields.inner().into_iter() { + args.push(key.into()); + args.push(value); + } + + Ok((RedisCommandKind::Xadd, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xtrim(inner: &Arc, key: RedisKey, cap: XCap) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(6); + args.push(key.into()); + encode_cap(&mut args, cap); + + Ok((RedisCommandKind::Xtrim, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xdel( + inner: &Arc, + key: RedisKey, + ids: MultipleStrings, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(1 + ids.len()); + args.push(key.into()); + + for id in ids.inner().into_iter() { + args.push(id.into()); + } + Ok((RedisCommandKind::Xdel, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xrange( + inner: &Arc, + key: RedisKey, + start: RedisValue, + end: RedisValue, + count: Option, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(5); + args.push(key.into()); + args.push(start); + args.push(end); + + if let Some(count) = count { + args.push(static_val!(COUNT)); + args.push(count.try_into()?); + } + + Ok((RedisCommandKind::Xrange, args)) + }) + .await?; + + protocol_utils::frame_to_results_raw(frame) +} + +pub async fn xrevrange( + inner: &Arc, + key: RedisKey, + end: RedisValue, + start: RedisValue, + count: Option, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(5); + args.push(key.into()); + args.push(end); + args.push(start); + + if let Some(count) = count { + args.push(static_val!(COUNT)); + args.push(count.try_into()?); + } + + Ok((RedisCommandKind::Xrevrange, args)) + }) + .await?; + + protocol_utils::frame_to_results_raw(frame) +} + +pub async fn xlen(inner: &Arc, key: RedisKey) -> Result { + one_arg_value_cmd(inner, RedisCommandKind::Xlen, key.into()).await +} + +pub async fn xread( + inner: &Arc, + count: Option, + block: Option, + keys: MultipleKeys, + ids: MultipleIDs, +) -> Result { + let is_clustered = utils::is_clustered(&inner.config); + let frame = utils::request_response(inner, move || { + let is_blocking = block.is_some(); + let mut hash_slot = None; + let mut args = Vec::with_capacity(5 + keys.len() + ids.len()); + + if let Some(count) = count { + args.push(static_val!(COUNT)); + args.push(count.try_into()?); + } + if let Some(block) = block { + args.push(static_val!(BLOCK)); + args.push(block.try_into()?); + } + + args.push(static_val!(STREAMS)); + for (idx, key) in keys.inner().into_iter().enumerate() { + // set the hash slot from the first key. if any other keys are on other cluster nodes the server will say something + if is_clustered && idx == 0 { + hash_slot = Some(redis_keyslot(key.as_bytes())); + } + + args.push(key.into()); + } + for id in ids.inner().into_iter() { + args.push(id.into_str().into()); + } + + Ok((RedisCommandKind::Xread((is_blocking, hash_slot)), args)) + }) + .await?; + + protocol_utils::frame_to_results_raw(frame) +} + +pub async fn xgroup_create( + inner: &Arc, + key: RedisKey, + groupname: Str, + id: XID, + mkstream: bool, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(4); + args.push(key.into()); + args.push(groupname.into()); + args.push(id.into_str().into()); + if mkstream { + args.push(static_val!(MKSTREAM)); + } + + Ok((RedisCommandKind::Xgroupcreate, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xgroup_createconsumer( + inner: &Arc, + key: RedisKey, + groupname: Str, + consumername: Str, +) -> Result { + let frame = utils::request_response(inner, move || { + Ok(( + RedisCommandKind::XgroupCreateConsumer, + vec![key.into(), groupname.into(), consumername.into()], + )) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xgroup_delconsumer( + inner: &Arc, + key: RedisKey, + groupname: Str, + consumername: Str, +) -> Result { + let frame = utils::request_response(inner, move || { + Ok(( + RedisCommandKind::XgroupDelConsumer, + vec![key.into(), groupname.into(), consumername.into()], + )) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xgroup_destroy( + inner: &Arc, + key: RedisKey, + groupname: Str, +) -> Result { + let frame = utils::request_response(inner, move || { + Ok((RedisCommandKind::XgroupDestroy, vec![key.into(), groupname.into()])) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xgroup_setid( + inner: &Arc, + key: RedisKey, + groupname: Str, + id: XID, +) -> Result { + let frame = utils::request_response(inner, move || { + Ok(( + RedisCommandKind::XgroupSetId, + vec![key.into(), groupname.into(), id.into_str().into()], + )) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xreadgroup( + inner: &Arc, + group: Str, + consumer: Str, + count: Option, + block: Option, + noack: bool, + keys: MultipleKeys, + ids: MultipleIDs, +) -> Result { + let is_clustered = utils::is_clustered(&inner.config); + let frame = utils::request_response(inner, move || { + let is_blocking = block.is_some(); + let mut hash_slot = None; + + let mut args = Vec::with_capacity(9 + keys.len() + ids.len()); + args.push(static_val!(GROUP)); + args.push(group.into()); + args.push(consumer.into()); + + if let Some(count) = count { + args.push(static_val!(COUNT)); + args.push(count.try_into()?); + } + if let Some(block) = block { + args.push(static_val!(BLOCK)); + args.push(block.try_into()?); + } + if noack { + args.push(static_val!(NOACK)); + } + + args.push(static_val!(STREAMS)); + for (idx, key) in keys.inner().into_iter().enumerate() { + if is_clustered && idx == 0 { + hash_slot = Some(redis_keyslot(key.as_bytes())); + } + + args.push(key.into()); + } + for id in ids.inner().into_iter() { + args.push(id.into_str().into()); + } + + Ok((RedisCommandKind::Xreadgroup((is_blocking, hash_slot)), args)) + }) + .await?; + + protocol_utils::frame_to_results_raw(frame) +} + +pub async fn xack( + inner: &Arc, + key: RedisKey, + group: Str, + ids: MultipleIDs, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(2 + ids.len()); + args.push(key.into()); + args.push(group.into()); + + for id in ids.inner().into_iter() { + args.push(id.into_str().into()); + } + Ok((RedisCommandKind::Xack, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn xclaim( + inner: &Arc, + key: RedisKey, + group: Str, + consumer: Str, + min_idle_time: u64, + ids: MultipleIDs, + idle: Option, + time: Option, + retry_count: Option, + force: bool, + justid: bool, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(12 + ids.len()); + args.push(key.into()); + args.push(group.into()); + args.push(consumer.into()); + args.push(min_idle_time.try_into()?); + + for id in ids.inner().into_iter() { + args.push(id.into_str().into()); + } + if let Some(idle) = idle { + args.push(static_val!(IDLE)); + args.push(idle.try_into()?); + } + if let Some(time) = time { + args.push(static_val!(TIME)); + args.push(time.try_into()?); + } + if let Some(retry_count) = retry_count { + args.push(static_val!(RETRYCOUNT)); + args.push(retry_count.try_into()?); + } + if force { + args.push(static_val!(FORCE)); + } + if justid { + args.push(static_val!(JUSTID)); + } + + Ok((RedisCommandKind::Xclaim, args)) + }) + .await?; + + protocol_utils::frame_to_results_raw(frame) +} + +pub async fn xautoclaim( + inner: &Arc, + key: RedisKey, + group: Str, + consumer: Str, + min_idle_time: u64, + start: XID, + count: Option, + justid: bool, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(8); + args.push(key.into()); + args.push(group.into()); + args.push(consumer.into()); + args.push(min_idle_time.try_into()?); + args.push(start.into_str().into()); + + if let Some(count) = count { + args.push(static_val!(COUNT)); + args.push(count.try_into()?); + } + if justid { + args.push(static_val!(JUSTID)); + } + + Ok((RedisCommandKind::Xautoclaim, args)) + }) + .await?; + + protocol_utils::frame_to_results_raw(frame) +} + +pub async fn xpending( + inner: &Arc, + key: RedisKey, + group: Str, + cmd_args: XPendingArgs, +) -> Result { + let frame = utils::request_response(inner, move || { + let mut args = Vec::with_capacity(8); + args.push(key.into()); + args.push(group.into()); + + if let Some((idle, start, end, count, consumer)) = cmd_args.into_parts()? { + if let Some(idle) = idle { + args.push(static_val!(IDLE)); + args.push(idle.try_into()?); + } + args.push(start.into_str().into()); + args.push(end.into_str().into()); + args.push(count.try_into()?); + if let Some(consumer) = consumer { + args.push(consumer.into()); + } + } + + Ok((RedisCommandKind::Xpending, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} diff --git a/src/commands/strings.rs b/src/commands/impls/strings.rs similarity index 100% rename from src/commands/strings.rs rename to src/commands/impls/strings.rs diff --git a/src/commands/interfaces/acl.rs b/src/commands/interfaces/acl.rs new file mode 100644 index 00000000..9a7823d2 --- /dev/null +++ b/src/commands/interfaces/acl.rs @@ -0,0 +1,137 @@ +use crate::commands; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{AclRule, AclUser, FromRedis, MultipleStrings, RedisValue}; +use crate::utils; +use bytes_utils::Str; + +/// Functions that implement the [ACL](https://redis.io/commands#server) interface. +pub trait AclInterface: ClientLike + Sized { + /// Create an ACL user with the specified rules or modify the rules of an existing user. + /// + /// + fn acl_setuser(&self, username: S, rules: Vec) -> AsyncResult<()> + where + S: Into, + { + into!(username); + async_spawn(self, |inner| async move { + commands::acl::acl_setuser(&inner, username, rules).await + }) + } + + /// When Redis is configured to use an ACL file (with the aclfile configuration option), this command will reload the + /// ACLs from the file, replacing all the current ACL rules with the ones defined in the file. + /// + /// + fn acl_load(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::acl::acl_load(&inner).await + }) + } + + /// When Redis is configured to use an ACL file (with the aclfile configuration option), this command will save the + /// currently defined ACLs from the server memory to the ACL file. + /// + /// + fn acl_save(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { commands::acl::acl_save(&inner).await }) + } + + /// The command shows the currently active ACL rules in the Redis server. + /// + /// + fn acl_list(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn( + self, + |inner| async move { commands::acl::acl_list(&inner).await?.convert() }, + ) + } + + /// The command shows a list of all the usernames of the currently configured users in the Redis ACL system. + /// + /// + fn acl_users(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::acl::acl_users(&inner).await?.convert() + }) + } + + /// The command returns all the rules defined for an existing ACL user. + /// + /// + fn acl_getuser(&self, username: S) -> AsyncResult> + where + S: Into, + { + into!(username); + async_spawn(self, |inner| async move { + commands::acl::acl_getuser(&inner, username).await + }) + } + + /// Delete all the specified ACL users and terminate all the connections that are authenticated with such users. + /// + /// + fn acl_deluser(&self, usernames: S) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + { + into!(usernames); + async_spawn(self, |inner| async move { + commands::acl::acl_deluser(&inner, usernames).await?.convert() + }) + } + + /// The command shows the available ACL categories if called without arguments. If a category name is given, + /// the command shows all the Redis commands in the specified category. + /// + /// + fn acl_cat(&self, category: Option) -> AsyncResult> { + async_spawn(self, |inner| async move { + commands::acl::acl_cat(&inner, category).await?.convert() + }) + } + + /// Generate a password with length `bits`, returning the password. + /// + /// + fn acl_genpass(&self, bits: Option) -> AsyncResult { + async_spawn(self, |inner| async move { + commands::acl::acl_genpass(&inner, bits).await?.convert() + }) + } + + /// Return the username the current connection is authenticated with. New connections are authenticated + /// with the "default" user. + /// + /// + fn acl_whoami(&self) -> AsyncResult { + async_spawn(self, |inner| async move { + commands::acl::acl_whoami(&inner).await?.convert() + }) + } + + /// Read `count` recent ACL security events. + /// + /// + fn acl_log_count(&self, count: Option) -> AsyncResult { + async_spawn(self, |inner| async move { + commands::acl::acl_log_count(&inner, count).await + }) + } + + /// Clear the ACL security events logs. + /// + /// + fn acl_log_reset(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { commands::acl::acl_log_reset(&inner).await }) + } +} diff --git a/src/commands/interfaces/client.rs b/src/commands/interfaces/client.rs new file mode 100644 index 00000000..fa1d39f0 --- /dev/null +++ b/src/commands/interfaces/client.rs @@ -0,0 +1,164 @@ +use crate::commands; +use crate::error::{RedisError, RedisErrorKind}; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{ + ClientKillFilter, ClientKillType, ClientPauseKind, ClientReplyFlag, ClientUnblockFlag, FromRedis, RedisValue, +}; +use crate::utils; +use bytes_utils::Str; +use std::collections::HashMap; +use std::sync::Arc; + +/// Functions that implement the [CLIENT](https://redis.io/commands#connection) interface. +pub trait ClientInterface: ClientLike + Sized { + /// Return the ID of the current connection. + /// + /// Note: Against a clustered deployment this will return the ID of a random connection. See [connection_ids](Self::connection_ids) for more information. + /// + /// + fn client_id(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::client::client_id(&inner).await?.convert() + }) + } + + /// Read the connection IDs for the active connections to each server. + /// + /// The returned map contains each server's `host:port` and the result of calling `CLIENT ID` on the connection. + /// + /// Note: despite being async this function will usually return cached information from the client if possible. + fn connection_ids(&self) -> AsyncResult, i64>> { + async_spawn(self, |inner| async move { + utils::read_connection_ids(&inner).await.ok_or(RedisError::new( + RedisErrorKind::Unknown, + "Failed to read connection IDs", + )) + }) + } + + /// Force update the client's sentinel nodes list if using the sentinel interface. + /// + /// The client will automatically update this when connections to the primary server close. + fn update_sentinel_nodes(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { utils::update_sentinel_nodes(&inner).await }) + } + + /// The command returns information and statistics about the current client connection in a mostly human readable format. + /// + /// + fn client_info(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::client::client_info(&inner).await?.convert() + }) + } + + /// Close a given connection or set of connections. + /// + /// + fn client_kill(&self, filters: Vec) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::client::client_kill(&inner, filters).await?.convert() + }) + } + + /// The CLIENT LIST command returns information and statistics about the client connections server in a mostly human readable format. + /// + /// + fn client_list(&self, r#type: Option, ids: Option>) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::client::client_list(&inner, r#type, ids).await?.convert() + }) + } + + /// The CLIENT GETNAME returns the name of the current connection as set by CLIENT SETNAME. + /// + /// + fn client_getname(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::client::client_getname(&inner).await?.convert() + }) + } + + /// Assign a name to the current connection. + /// + /// **Note: The client automatically generates a unique name for each client that is shared by all underlying connections. + /// Use `self.id() to read the automatically generated name.** + /// + /// + fn client_setname(&self, name: S) -> AsyncResult<()> + where + S: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::client::client_setname(&inner, name).await + }) + } + + /// CLIENT PAUSE is a connections control command able to suspend all the Redis clients for the specified amount of time (in milliseconds). + /// + /// + fn client_pause(&self, timeout: i64, mode: Option) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + commands::client::client_pause(&inner, timeout, mode).await + }) + } + + /// CLIENT UNPAUSE is used to resume command processing for all clients that were paused by CLIENT PAUSE. + /// + /// + fn client_unpause(&self) -> AsyncResult<()> { + async_spawn( + self, + |inner| async move { commands::client::client_unpause(&inner).await }, + ) + } + + /// The CLIENT REPLY command controls whether the server will reply the client's commands. The following modes are available: + /// + /// + fn client_reply(&self, flag: ClientReplyFlag) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + commands::client::client_reply(&inner, flag).await + }) + } + + /// This command can unblock, from a different connection, a client blocked in a blocking operation, such as for instance BRPOP or XREAD or WAIT. + /// + /// Note: this command is sent on a backchannel connection and will work even when the main connection is blocked. + /// + /// + fn client_unblock(&self, id: S, flag: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + { + into!(id); + async_spawn(self, |inner| async move { + commands::client::client_unblock(&inner, id, flag).await?.convert() + }) + } + + /// A convenience function to unblock any blocked connection on this client. + fn unblock_self(&self, flag: Option) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::client::unblock_self(&inner, flag).await + }) + } +} diff --git a/src/commands/interfaces/cluster.rs b/src/commands/interfaces/cluster.rs new file mode 100644 index 00000000..bed1d0f9 --- /dev/null +++ b/src/commands/interfaces/cluster.rs @@ -0,0 +1,282 @@ +use crate::commands; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{ + ClusterFailoverFlag, ClusterInfo, ClusterKeyCache, ClusterResetFlag, ClusterSetSlotState, FromRedis, + MultipleHashSlots, RedisKey, RedisValue, +}; +use crate::utils; +use bytes_utils::Str; + +/// Functions that implement the [CLUSTER](https://redis.io/commands#cluster) interface. +pub trait ClusterInterface: ClientLike + Sized { + /// Whether or not the client is using a clustered Redis deployment. + fn is_clustered(&self) -> bool { + utils::is_clustered(&self.inner().config) + } + + /// Read the cached state of the cluster used for routing commands to the correct cluster nodes. + fn cached_cluster_state(&self) -> Option { + self.inner().cluster_state.read().clone() + } + + /// Advances the cluster config epoch. + /// + /// + fn cluster_bumpepoch(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::cluster::cluster_bumpepoch(&inner).await?.convert() + }) + } + + /// Deletes all slots from a node. + /// + /// + fn cluster_flushslots(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_flushslots(&inner).await + }) + } + + /// Returns the node's id. + /// + /// + fn cluster_myid(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::cluster::cluster_myid(&inner).await?.convert() + }) + } + + /// Read the current cluster node configuration. + /// + /// Note: The client keeps a cached, parsed version of the cluster state in memory available at [cached_cluster_state](Self::cached_cluster_state). + /// + /// + fn cluster_nodes(&self) -> AsyncResult { + async_spawn(self, |inner| async move { + commands::cluster::cluster_nodes(&inner).await?.convert() + }) + } + + /// Forces a node to save the nodes.conf configuration on disk. + /// + /// + fn cluster_saveconfig(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_saveconfig(&inner).await + }) + } + + /// CLUSTER SLOTS returns details about which cluster slots map to which Redis instances. + /// + /// + fn cluster_slots(&self) -> AsyncResult { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_slots(&inner).await + }) + } + + /// CLUSTER INFO provides INFO style information about Redis Cluster vital parameters. + /// + /// + fn cluster_info(&self) -> AsyncResult { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_info(&inner).await + }) + } + + /// This command is useful in order to modify a node's view of the cluster configuration. Specifically it assigns a set of hash slots to the node receiving the command. + /// + /// + fn cluster_add_slots(&self, slots: S) -> AsyncResult<()> + where + S: Into, + { + into!(slots); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_add_slots(&inner, slots).await + }) + } + + /// The command returns the number of failure reports for the specified node. + /// + /// + fn cluster_count_failure_reports(&self, node_id: S) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + { + into!(node_id); + async_spawn(self, |inner| async move { + commands::cluster::cluster_count_failure_reports(&inner, node_id) + .await? + .convert() + }) + } + + /// Returns the number of keys in the specified Redis Cluster hash slot. + /// + /// + fn cluster_count_keys_in_slot(&self, slot: u16) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::cluster::cluster_count_keys_in_slot(&inner, slot) + .await? + .convert() + }) + } + + /// The CLUSTER DELSLOTS command asks a particular Redis Cluster node to forget which master is serving the hash slots specified as arguments. + /// + /// + fn cluster_del_slots(&self, slots: S) -> AsyncResult<()> + where + S: Into, + { + into!(slots); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_del_slots(&inner, slots).await + }) + } + + /// This command, that can only be sent to a Redis Cluster replica node, forces the replica to start a manual failover of its master instance. + /// + /// + fn cluster_failover(&self, flag: Option) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_failover(&inner, flag).await + }) + } + + /// The command is used in order to remove a node, specified via its node ID, from the set of known nodes of the Redis Cluster node receiving the command. + /// In other words the specified node is removed from the nodes table of the node receiving the command. + /// + /// + fn cluster_forget(&self, node_id: S) -> AsyncResult<()> + where + S: Into, + { + into!(node_id); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_forget(&inner, node_id).await + }) + } + + /// The command returns an array of keys names stored in the contacted node and hashing to the specified hash slot. + /// + /// + fn cluster_get_keys_in_slot(&self, slot: u16, count: u64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_get_keys_in_slot(&inner, slot, count) + .await? + .convert() + }) + } + + /// Returns an integer identifying the hash slot the specified key hashes to. + /// + /// + fn cluster_keyslot(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::cluster::cluster_keyslot(&inner, key).await?.convert() + }) + } + + /// CLUSTER MEET is used in order to connect different Redis nodes with cluster support enabled, into a working cluster. + /// + /// + fn cluster_meet(&self, ip: S, port: u16) -> AsyncResult<()> + where + S: Into, + { + into!(ip); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_meet(&inner, ip, port).await + }) + } + + /// The command reconfigures a node as a replica of the specified master. If the node receiving the command is an empty master, as + /// a side effect of the command, the node role is changed from master to replica. + /// + /// + fn cluster_replicate(&self, node_id: S) -> AsyncResult<()> + where + S: Into, + { + into!(node_id); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_replicate(&inner, node_id).await + }) + } + + /// The command provides a list of replica nodes replicating from the specified master node. + /// + /// + fn cluster_replicas(&self, node_id: S) -> AsyncResult + where + S: Into, + { + into!(node_id); + async_spawn(self, |inner| async move { + commands::cluster::cluster_replicas(&inner, node_id).await?.convert() + }) + } + + /// Reset a Redis Cluster node, in a more or less drastic way depending on the reset type, that can be hard or soft. Note that + /// this command does not work for masters if they hold one or more keys, in that case to completely reset a master node keys + /// must be removed first, e.g. by using FLUSHALL first, and then CLUSTER RESET. + /// + /// + fn cluster_reset(&self, mode: Option) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_reset(&inner, mode).await + }) + } + + /// This command sets a specific config epoch in a fresh node. + /// + /// + fn cluster_set_config_epoch(&self, epoch: u64) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_set_config_epoch(&inner, epoch).await + }) + } + + /// CLUSTER SETSLOT is responsible of changing the state of a hash slot in the receiving node in different ways. + /// + /// + fn cluster_setslot(&self, slot: u16, state: ClusterSetSlotState) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::cluster::cluster_setslot(&inner, slot, state).await + }) + } +} diff --git a/src/commands/interfaces/config.rs b/src/commands/interfaces/config.rs new file mode 100644 index 00000000..e563b993 --- /dev/null +++ b/src/commands/interfaces/config.rs @@ -0,0 +1,64 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{FromRedis, RedisValue}; +use crate::utils; +use bytes_utils::Str; +use std::convert::TryInto; + +/// Functions that implement the [CONFIG](https://redis.io/commands#server) interface. +pub trait ConfigInterface: ClientLike + Sized { + /// Resets the statistics reported by Redis using the INFO command. + /// + /// + fn config_resetstat(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::config::config_resetstat(&inner).await + }) + } + + /// The CONFIG REWRITE command rewrites the redis.conf file the server was started with, applying the minimal changes needed to make it + /// reflect the configuration currently used by the server, which may be different compared to the original one because of the use of + /// the CONFIG SET command. + /// + /// + fn config_rewrite(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::config::config_rewrite(&inner).await + }) + } + + /// The CONFIG GET command is used to read the configuration parameters of a running Redis server. + /// + /// + fn config_get(&self, parameter: S) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + { + into!(parameter); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::config::config_get(&inner, parameter).await?.convert() + }) + } + + /// The CONFIG SET command is used in order to reconfigure the server at run time without the need to restart Redis. + /// + /// + fn config_set(&self, parameter: P, value: V) -> AsyncResult<()> + where + P: Into, + V: TryInto, + V::Error: Into, + { + into!(parameter); + try_into!(value); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::config::config_set(&inner, parameter, value).await + }) + } +} diff --git a/src/commands/interfaces/geo.rs b/src/commands/interfaces/geo.rs new file mode 100644 index 00000000..f65fcf00 --- /dev/null +++ b/src/commands/interfaces/geo.rs @@ -0,0 +1,234 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{ + Any, FromRedis, GeoPosition, GeoRadiusInfo, GeoUnit, MultipleGeoValues, MultipleValues, RedisKey, RedisValue, + SetOptions, SortOrder, +}; +use std::convert::TryInto; + +/// Functions that implement the [GEO](https://redis.io/commands#geo) interface. +pub trait GeoInterface: ClientLike + Sized { + /// Adds the specified geospatial items (longitude, latitude, name) to the specified key. + /// + /// + fn geoadd(&self, key: K, options: Option, changed: bool, values: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: Into, + { + into!(key, values); + async_spawn(self, move |inner| async move { + commands::geo::geoadd(&inner, key, options, changed, values) + .await? + .convert() + }) + } + + /// Return valid Geohash strings representing the position of one or more elements in a sorted set value representing a geospatial index (where elements were added using GEOADD). + /// + /// + fn geohash(&self, key: K, members: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(members); + async_spawn(self, |inner| async move { + commands::geo::geohash(&inner, key, members).await?.convert() + }) + } + + /// Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by the sorted set at key. + /// + /// Callers can use [as_geo_position](crate::types::RedisValue::as_geo_position) to lazily parse results as needed. + /// + /// + fn geopos(&self, key: K, members: V) -> AsyncResult + where + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(members); + async_spawn(self, |inner| async move { + commands::geo::geopos(&inner, key, members).await + }) + } + + /// Return the distance between two members in the geospatial index represented by the sorted set. + /// + /// + fn geodist(&self, key: K, src: S, dest: D, unit: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + S: TryInto, + S::Error: Into, + D: TryInto, + D::Error: Into, + { + into!(key); + try_into!(src, dest); + async_spawn(self, |inner| async move { + commands::geo::geodist(&inner, key, src, dest, unit).await?.convert() + }) + } + + /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the borders of the area specified with + /// the center location and the maximum distance from the center (the radius). + /// + /// + fn georadius( + &self, + key: K, + position: P, + radius: f64, + unit: GeoUnit, + withcoord: bool, + withdist: bool, + withhash: bool, + count: Option<(u64, Any)>, + ord: Option, + store: Option, + storedist: Option, + ) -> AsyncResult> + where + K: Into, + P: Into, + { + into!(key, position); + async_spawn(self, |inner| async move { + commands::geo::georadius( + &inner, key, position, radius, unit, withcoord, withdist, withhash, count, ord, store, storedist, + ) + .await + }) + } + + /// This command is exactly like GEORADIUS with the sole difference that instead of taking, as the center of the area to query, a longitude and + /// latitude value, it takes the name of a member already existing inside the geospatial index represented by the sorted set. + /// + /// + fn georadiusbymember( + &self, + key: K, + member: V, + radius: f64, + unit: GeoUnit, + withcoord: bool, + withdist: bool, + withhash: bool, + count: Option<(u64, Any)>, + ord: Option, + store: Option, + storedist: Option, + ) -> AsyncResult> + where + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(member); + async_spawn(self, |inner| async move { + commands::geo::georadiusbymember( + &inner, + key, + to!(member)?, + radius, + unit, + withcoord, + withdist, + withhash, + count, + ord, + store, + storedist, + ) + .await + }) + } + + /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the borders of the area specified by a given shape. + /// + /// + fn geosearch( + &self, + key: K, + from_member: Option, + from_lonlat: Option, + by_radius: Option<(f64, GeoUnit)>, + by_box: Option<(f64, f64, GeoUnit)>, + ord: Option, + count: Option<(u64, Any)>, + withcoord: bool, + withdist: bool, + withhash: bool, + ) -> AsyncResult> + where + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::geo::geosearch( + &inner, + key, + from_member, + from_lonlat, + by_radius, + by_box, + ord, + count, + withcoord, + withdist, + withhash, + ) + .await + }) + } + + /// This command is like GEOSEARCH, but stores the result in destination key. Returns the number of members added to the destination key. + /// + /// + fn geosearchstore( + &self, + dest: D, + source: S, + from_member: Option, + from_lonlat: Option, + by_radius: Option<(f64, GeoUnit)>, + by_box: Option<(f64, f64, GeoUnit)>, + ord: Option, + count: Option<(u64, Any)>, + storedist: bool, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + D: Into, + S: Into, + { + into!(dest, source); + async_spawn(self, |inner| async move { + commands::geo::geosearchstore( + &inner, + dest, + source, + from_member, + from_lonlat, + by_radius, + by_box, + ord, + count, + storedist, + ) + .await? + .convert() + }) + } +} diff --git a/src/commands/interfaces/hashes.rs b/src/commands/interfaces/hashes.rs new file mode 100644 index 00000000..24078a8f --- /dev/null +++ b/src/commands/interfaces/hashes.rs @@ -0,0 +1,241 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{FromRedis, MultipleKeys, RedisKey, RedisMap, RedisValue}; +use std::convert::TryInto; + +/// Functions that implement the [Hashes](https://redis.io/commands#hashes) interface. +pub trait HashesInterface: ClientLike + Sized { + /// Returns all fields and values of the hash stored at `key`. + /// + /// + fn hgetall(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::hashes::hgetall(&inner, key).await?.convert() + }) + } + + /// Removes the specified fields from the hash stored at `key`. + /// + /// + fn hdel(&self, key: K, fields: F) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + F: Into, + { + into!(key, fields); + async_spawn(self, |inner| async move { + commands::hashes::hdel(&inner, key, fields).await?.convert() + }) + } + + /// Returns if `field` is an existing field in the hash stored at `key`. + /// + /// + fn hexists(&self, key: K, field: F) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + F: Into, + { + into!(key, field); + async_spawn(self, |inner| async move { + commands::hashes::hexists(&inner, key, field).await?.convert() + }) + } + + /// Returns the value associated with `field` in the hash stored at `key`. + /// + /// + fn hget(&self, key: K, field: F) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + F: Into, + { + into!(key, field); + async_spawn(self, |inner| async move { + commands::hashes::hget(&inner, key, field).await?.convert() + }) + } + + /// Increments the number stored at `field` in the hash stored at `key` by `increment`. + /// + /// + fn hincrby(&self, key: K, field: F, increment: i64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + F: Into, + { + into!(key, field); + async_spawn(self, |inner| async move { + commands::hashes::hincrby(&inner, key, field, increment) + .await? + .convert() + }) + } + + /// Increment the specified `field` of a hash stored at `key`, and representing a floating point number, by the specified `increment`. + /// + /// + fn hincrbyfloat(&self, key: K, field: F, increment: f64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + F: Into, + { + into!(key, field); + async_spawn(self, |inner| async move { + commands::hashes::hincrbyfloat(&inner, key, field, increment) + .await? + .convert() + }) + } + + /// Returns all field names in the hash stored at `key`. + /// + /// + fn hkeys(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::hashes::hkeys(&inner, key).await?.convert() + }) + } + + /// Returns the number of fields contained in the hash stored at `key`. + /// + /// + fn hlen(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::hashes::hlen(&inner, key).await?.convert() + }) + } + + /// Returns the values associated with the specified `fields` in the hash stored at `key`. + /// + /// + fn hmget(&self, key: K, fields: F) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + F: Into, + { + into!(key, fields); + async_spawn(self, |inner| async move { + commands::hashes::hmget(&inner, key, fields).await?.convert() + }) + } + + /// Sets the specified fields to their respective values in the hash stored at `key`. + /// + /// + fn hmset(&self, key: K, values: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(values); + async_spawn(self, |inner| async move { + commands::hashes::hmset(&inner, key, values).await?.convert() + }) + } + + /// Sets fields in the hash stored at `key` to their provided values. + /// + /// + fn hset(&self, key: K, values: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(values); + async_spawn(self, |inner| async move { + commands::hashes::hset(&inner, key, values).await?.convert() + }) + } + + /// Sets `field` in the hash stored at `key` to `value`, only if `field` does not yet exist. + /// + /// + fn hsetnx(&self, key: K, field: F, value: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + F: Into, + V: TryInto, + V::Error: Into, + { + into!(key, field); + try_into!(value); + async_spawn(self, |inner| async move { + commands::hashes::hsetnx(&inner, key, field, value).await?.convert() + }) + } + + /// When called with just the `key` argument, return a random field from the hash value stored at `key`. + /// + /// If the provided `count` argument is positive, return an array of distinct fields. + /// + /// + fn hrandfield(&self, key: K, count: Option<(i64, bool)>) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::hashes::hrandfield(&inner, key, count).await?.convert() + }) + } + + /// Returns the string length of the value associated with `field` in the hash stored at `key`. + /// + /// + fn hstrlen(&self, key: K, field: F) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + F: Into, + { + into!(key, field); + async_spawn(self, |inner| async move { + commands::hashes::hstrlen(&inner, key, field).await?.convert() + }) + } + + /// Returns all values in the hash stored at `key`. + /// + /// + fn hvals(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::hashes::hvals(&inner, key).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/hyperloglog.rs b/src/commands/interfaces/hyperloglog.rs new file mode 100644 index 00000000..8f0b604c --- /dev/null +++ b/src/commands/interfaces/hyperloglog.rs @@ -0,0 +1,59 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{FromRedis, MultipleKeys, MultipleValues, RedisKey}; +use std::convert::TryInto; + +/// Functions that implement the [HyperLogLog](https://redis.io/commands#hyperloglog) interface. +pub trait HyperloglogInterface: ClientLike + Sized { + /// Adds all the element arguments to the HyperLogLog data structure stored at the variable name specified as first argument. + /// + /// + fn pfadd(&self, key: K, elements: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(elements); + async_spawn(self, |inner| async move { + commands::hyperloglog::pfadd(&inner, key, elements).await?.convert() + }) + } + + /// When called with a single key, returns the approximated cardinality computed by the HyperLogLog data structure stored at + /// the specified variable, which is 0 if the variable does not exist. + /// + /// When called with multiple keys, returns the approximated cardinality of the union of the HyperLogLogs passed, by + /// internally merging the HyperLogLogs stored at the provided keys into a temporary HyperLogLog. + /// + /// + fn pfcount(&self, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::hyperloglog::pfcount(&inner, keys).await?.convert() + }) + } + + /// Merge multiple HyperLogLog values into an unique value that will approximate the cardinality of the union of the observed + /// sets of the source HyperLogLog structures. + /// + /// + fn pfmerge(&self, dest: D, sources: S) -> AsyncResult + where + R: FromRedis + Unpin + Send, + D: Into, + S: Into, + { + into!(dest, sources); + async_spawn(self, |inner| async move { + commands::hyperloglog::pfmerge(&inner, dest, sources).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/keys.rs b/src/commands/interfaces/keys.rs new file mode 100644 index 00000000..7efdcb06 --- /dev/null +++ b/src/commands/interfaces/keys.rs @@ -0,0 +1,422 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{Expiration, FromRedis, MultipleKeys, RedisKey, RedisMap, RedisValue, SetOptions}; +use std::convert::TryInto; + +/// Functions that implement the generic [keys](https://redis.io/commands#generic) interface. +pub trait KeysInterface: ClientLike + Sized { + /// Return a random key from the currently selected database. + /// + /// + fn randomkey(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::keys::randomkey(&inner).await?.convert() + }) + } + + /// This command copies the value stored at the source key to the destination key. + /// + /// + fn copy(&self, source: S, destination: D, db: Option, replace: bool) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + D: Into, + { + into!(source, destination); + async_spawn(self, |inner| async move { + commands::keys::copy(&inner, source, destination, db, replace) + .await? + .convert() + }) + } + + /// Serialize the value stored at `key` in a Redis-specific format and return it as bulk string. + /// + /// + fn dump(&self, key: K) -> AsyncResult + where + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { commands::keys::dump(&inner, key).await }) + } + + /// Create a key associated with a value that is obtained by deserializing the provided serialized value + /// + /// + fn restore( + &self, + key: K, + ttl: i64, + serialized: RedisValue, + replace: bool, + absttl: bool, + idletime: Option, + frequency: Option, + ) -> AsyncResult + where + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::restore(&inner, key, ttl, serialized, replace, absttl, idletime, frequency).await + }) + } + + /// Set a value with optional NX|XX, EX|PX|EXAT|PXAT|KEEPTTL, and GET arguments. + /// + /// Note: the `get` flag was added in 6.2.0. + /// + /// + fn set( + &self, + key: K, + value: V, + expire: Option, + options: Option, + get: bool, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(value); + async_spawn(self, |inner| async move { + commands::keys::set(&inner, key, value, expire, options, get) + .await? + .convert() + }) + } + + /// Read a value from the server. + /// + /// + fn get(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::get(&inner, key).await?.convert() + }) + } + + /// Returns the substring of the string value stored at `key` with offsets `start` and `end` (both inclusive). + /// + /// Note: Command formerly called SUBSTR in Redis verison <=2.0. + /// + /// + fn getrange(&self, key: K, start: usize, end: usize) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::getrange(&inner, key, start, end).await?.convert() + }) + } + + /// Overwrites part of the string stored at `key`, starting at the specified `offset`, for the entire length of `value`. + /// + /// + fn setrange(&self, key: K, offset: u32, value: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(value); + async_spawn(self, |inner| async move { + commands::keys::setrange(&inner, key, offset, value).await?.convert() + }) + } + + /// Atomically sets `key` to `value` and returns the old value stored at `key`. + /// + /// Returns an error if `key` does not hold string value. Returns nil if `key` does not exist. + /// + /// + fn getset(&self, key: K, value: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(value); + async_spawn(self, |inner| async move { + commands::keys::getset(&inner, key, value).await?.convert() + }) + } + + /// Get the value of key and delete the key. This command is similar to GET, except for the fact that it also deletes the key on success (if and only if the key's value type is a string). + /// + /// + fn getdel(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::getdel(&inner, key).await?.convert() + }) + } + + /// Returns the length of the string value stored at key. An error is returned when key holds a non-string value. + /// + /// + fn strlen(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::strlen(&inner, key).await?.convert() + }) + } + + /// Removes the specified keys. A key is ignored if it does not exist. + /// + /// Returns the number of keys removed. + /// + /// + fn del(&self, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::keys::del(&inner, keys).await?.convert() + }) + } + + /// Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned. + /// + /// + fn mget(&self, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::keys::mget(&inner, keys).await?.convert() + }) + } + + /// Sets the given keys to their respective values. + /// + /// + fn mset(&self, values: V) -> AsyncResult<()> + where + V: TryInto, + V::Error: Into, + { + try_into!(values); + async_spawn(self, |inner| async move { + commands::keys::mset(&inner, values).await?.convert() + }) + } + + /// Sets the given keys to their respective values. MSETNX will not perform any operation at all even if just a single key already exists. + /// + /// + fn msetnx(&self, values: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + V: TryInto, + V::Error: Into, + { + try_into!(values); + async_spawn(self, |inner| async move { + commands::keys::msetnx(&inner, values).await?.convert() + }) + } + + /// Increments the number stored at `key` by one. If the key does not exist, it is set to 0 before performing the operation. + /// + /// Returns an error if the value at key is of the wrong type. + /// + /// + fn incr(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::incr(&inner, key).await?.convert() + }) + } + + /// Increments the number stored at `key` by `val`. If the key does not exist, it is set to 0 before performing the operation. + /// + /// Returns an error if the value at key is of the wrong type. + /// + /// + fn incr_by(&self, key: K, val: i64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::incr_by(&inner, key, val).await?.convert() + }) + } + + /// Increment the string representing a floating point number stored at key by `val`. If the key does not exist, it is set to 0 before performing the operation. + /// + /// Returns an error if key value is the wrong type or if the current value cannot be parsed as a floating point value. + /// + /// + fn incr_by_float(&self, key: K, val: f64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::incr_by_float(&inner, key, val).await?.convert() + }) + } + + /// Decrements the number stored at `key` by one. If the key does not exist, it is set to 0 before performing the operation. + /// + /// Returns an error if the key contains a value of the wrong type. + /// + /// + fn decr(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::decr(&inner, key).await?.convert() + }) + } + + /// Decrements the number stored at `key` by `val`. If the key does not exist, it is set to 0 before performing the operation. + /// + /// Returns an error if the key contains a value of the wrong type. + /// + /// + fn decr_by(&self, key: K, val: i64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::decr_by(&inner, key, val).await?.convert() + }) + } + + /// Returns the remaining time to live of a key that has a timeout, in seconds. + /// + /// + fn ttl(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::ttl(&inner, key).await?.convert() + }) + } + + /// Returns the remaining time to live of a key that has a timeout, in milliseconds. + /// + /// + fn pttl(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::pttl(&inner, key).await?.convert() + }) + } + + /// Remove the existing timeout on a key, turning the key from volatile (a key with an expiration) + /// to persistent (a key that will never expire as no timeout is associated). + /// + /// Returns a boolean value describing whether or not the timeout was removed. + /// + /// + fn persist(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::persist(&inner, key).await?.convert() + }) + } + + /// Set a timeout on key. After the timeout has expired, the key will be automatically deleted. + /// + /// Returns a boolean value describing whether or not the timeout was added. + /// + /// + fn expire(&self, key: K, seconds: i64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::expire(&inner, key, seconds).await?.convert() + }) + } + + /// Set a timeout on a key based on a UNIX timestamp. + /// + /// Returns a boolean value describing whether or not the timeout was added. + /// + /// + fn expire_at(&self, key: K, timestamp: i64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::keys::expire_at(&inner, key, timestamp).await?.convert() + }) + } + + /// Returns number of keys that exist from the `keys` arguments. + /// + /// + fn exists(&self, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::keys::exists(&inner, keys).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/lists.rs b/src/commands/interfaces/lists.rs new file mode 100644 index 00000000..ce464d86 --- /dev/null +++ b/src/commands/interfaces/lists.rs @@ -0,0 +1,358 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{FromRedis, LMoveDirection, ListLocation, MultipleKeys, MultipleValues, RedisKey, RedisValue}; +use std::convert::TryInto; + +/// Functions that implement the [Lists](https://redis.io/commands#lists) interface. +pub trait ListInterface: ClientLike + Sized { + /// BLPOP is a blocking list pop primitive. It is the blocking version of LPOP because it blocks the connection when there are no elements to pop from + /// any of the given lists. An element is popped from the head of the first list that is non-empty, with the given keys being checked in the order that they are given. + /// + /// + fn blpop(&self, keys: K, timeout: f64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::lists::blpop(&inner, keys, timeout).await?.convert() + }) + } + + /// BRPOP is a blocking list pop primitive. It is the blocking version of RPOP because it blocks the connection when there are no elements to pop from any of the + /// given lists. An element is popped from the tail of the first list that is non-empty, with the given keys being checked in the order that they are given. + /// + /// + fn brpop(&self, keys: K, timeout: f64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::lists::brpop(&inner, keys, timeout).await?.convert() + }) + } + + /// The blocking equivalent of [Self::rpoplpush]. + /// + /// + fn brpoplpush(&self, source: S, destination: D, timeout: f64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + D: Into, + { + into!(source, destination); + async_spawn(self, |inner| async move { + commands::lists::brpoplpush(&inner, source, destination, timeout) + .await? + .convert() + }) + } + + /// The blocking equivalent of [Self::lmove]. + /// + /// + fn blmove( + &self, + source: S, + destination: D, + source_direction: LMoveDirection, + destination_direction: LMoveDirection, + timeout: f64, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + D: Into, + { + into!(source, destination); + async_spawn(self, |inner| async move { + commands::lists::blmove( + &inner, + source, + destination, + source_direction, + destination_direction, + timeout, + ) + .await? + .convert() + }) + } + + /// Returns the element at index index in the list stored at key. + /// + /// + fn lindex(&self, key: K, index: i64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::lists::lindex(&inner, key, index).await?.convert() + }) + } + + /// Inserts element in the list stored at key either before or after the reference value `pivot`. + /// + /// + fn linsert(&self, key: K, location: ListLocation, pivot: P, element: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + P: TryInto, + P::Error: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(pivot, element); + async_spawn(self, |inner| async move { + commands::lists::linsert(&inner, key, location, pivot, element) + .await? + .convert() + }) + } + + /// Returns the length of the list stored at key. + /// + /// + fn llen(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::lists::llen(&inner, key).await?.convert() + }) + } + + /// Removes and returns the first elements of the list stored at key. + /// + /// + fn lpop(&self, key: K, count: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::lists::lpop(&inner, key, count).await?.convert() + }) + } + + /// The command returns the index of matching elements inside a Redis list. + /// + /// + fn lpos( + &self, + key: K, + element: V, + rank: Option, + count: Option, + maxlen: Option, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(element); + async_spawn(self, |inner| async move { + commands::lists::lpos(&inner, key, element, rank, count, maxlen) + .await? + .convert() + }) + } + + /// Insert all the specified values at the head of the list stored at `key`. + /// + /// + fn lpush(&self, key: K, elements: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(elements); + async_spawn(self, |inner| async move { + commands::lists::lpush(&inner, key, elements).await?.convert() + }) + } + + /// Inserts specified values at the head of the list stored at `key`, only if `key` already exists and holds a list. + /// + /// + fn lpushx(&self, key: K, elements: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(elements); + async_spawn(self, |inner| async move { + commands::lists::lpushx(&inner, key, elements).await?.convert() + }) + } + + /// Returns the specified elements of the list stored at `key`. + /// + /// + fn lrange(&self, key: K, start: i64, stop: i64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::lists::lrange(&inner, key, start, stop).await?.convert() + }) + } + + /// Removes the first `count` occurrences of elements equal to `element` from the list stored at `key`. + /// + /// + fn lrem(&self, key: K, count: i64, element: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(element); + async_spawn(self, |inner| async move { + commands::lists::lrem(&inner, key, count, element).await?.convert() + }) + } + + /// Sets the list element at `index` to `element`. + /// + /// + fn lset(&self, key: K, index: i64, element: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(element); + async_spawn(self, |inner| async move { + commands::lists::lset(&inner, key, index, element).await?.convert() + }) + } + + /// Trim an existing list so that it will contain only the specified range of elements specified. + /// + /// + fn ltrim(&self, key: K, start: i64, stop: i64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::lists::ltrim(&inner, key, start, stop).await?.convert() + }) + } + + /// Removes and returns the last elements of the list stored at `key`. + /// + /// + fn rpop(&self, key: K, count: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::lists::rpop(&inner, key, count).await?.convert() + }) + } + + /// Atomically returns and removes the last element (tail) of the list stored at `source`, and pushes the element at the first element (head) of the list stored at `destination`. + /// + /// + fn rpoplpush(&self, source: S, dest: D) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + D: Into, + { + into!(source, dest); + async_spawn(self, |inner| async move { + commands::lists::rpoplpush(&inner, source, dest).await?.convert() + }) + } + + /// Atomically returns and removes the first/last element (head/tail depending on the source direction argument) of the list stored at `source`, and pushes + /// the element at the first/last element (head/tail depending on the destination direction argument) of the list stored at `destination`. + /// + /// + fn lmove( + &self, + source: S, + dest: D, + source_direction: LMoveDirection, + dest_direction: LMoveDirection, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + D: Into, + { + into!(source, dest); + async_spawn(self, |inner| async move { + commands::lists::lmove(&inner, source, dest, source_direction, dest_direction) + .await? + .convert() + }) + } + + /// Insert all the specified values at the tail of the list stored at `key`. + /// + /// + fn rpush(&self, key: K, elements: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(elements); + async_spawn(self, |inner| async move { + commands::lists::rpush(&inner, key, elements).await?.convert() + }) + } + + /// Inserts specified values at the tail of the list stored at `key`, only if key already exists and holds a list. + /// + /// + fn rpushx(&self, key: K, elements: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(elements); + async_spawn(self, |inner| async move { + commands::lists::rpushx(&inner, key, elements).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/lua.rs b/src/commands/interfaces/lua.rs new file mode 100644 index 00000000..30b91522 --- /dev/null +++ b/src/commands/interfaces/lua.rs @@ -0,0 +1,135 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{FromRedis, MultipleKeys, MultipleStrings, MultipleValues, ScriptDebugFlag}; +use crate::utils; +use bytes_utils::Str; +use std::convert::TryInto; + +/// Functions that implement the [lua](https://redis.io/commands#lua) interface. +pub trait LuaInterface: ClientLike + Sized { + /// Load a script into the scripts cache, without executing it. After the specified command is loaded into the script cache it will be callable using EVALSHA with the correct SHA1 digest of the script. + /// + /// + fn script_load(&self, script: S) -> AsyncResult + where + S: Into, + { + into!(script); + async_spawn(self, |inner| async move { + commands::lua::script_load(&inner, script).await?.convert() + }) + } + + /// A clustered variant of [script_load](Self::script_load) that loads the script on all primary nodes in a cluster. + fn script_load_cluster(&self, script: S) -> AsyncResult + where + S: Into, + { + into!(script); + async_spawn(self, |inner| async move { + commands::lua::script_load_cluster(&inner, script).await?.convert() + }) + } + + /// Kills the currently executing Lua script, assuming no write operation was yet performed by the script. + /// + /// + fn script_kill(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::lua::script_kill(&inner).await + }) + } + + /// A clustered variant of the [script_kill](Self::script_kill) command that issues the command to all primary nodes in the cluster. + fn script_kill_cluster(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::lua::script_kill_cluster(&inner).await + }) + } + + /// Flush the Lua scripts cache. + /// + /// + fn script_flush(&self, r#async: bool) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::lua::script_flush(&inner, r#async).await + }) + } + + /// A clustered variant of [script_flush](Self::script_flush) that flushes the script cache on all primary nodes in the cluster. + fn script_flush_cluster(&self, r#async: bool) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::lua::script_flush_cluster(&inner, r#async).await + }) + } + + /// Returns information about the existence of the scripts in the script cache. + /// + /// + fn script_exists(&self, hashes: H) -> AsyncResult> + where + H: Into, + { + into!(hashes); + async_spawn(self, |inner| async move { + commands::lua::script_exists(&inner, hashes).await + }) + } + + /// Set the debug mode for subsequent scripts executed with EVAL. + /// + /// + fn script_debug(&self, flag: ScriptDebugFlag) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::lua::script_debug(&inner, flag).await + }) + } + + /// Evaluates a script cached on the server side by its SHA1 digest. + /// + /// + /// + /// **Note: Use `None` to represent an empty set of keys or args.** + fn evalsha(&self, hash: S, keys: K, args: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(hash, keys); + try_into!(args); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::lua::evalsha(&inner, hash, keys, args).await?.convert() + }) + } + + /// Evaluate a Lua script on the server. + /// + /// + /// + /// **Note: Use `None` to represent an empty set of keys or args.** + fn eval(&self, script: S, keys: K, args: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(script, keys); + try_into!(args); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::lua::eval(&inner, script, keys, args).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/memory.rs b/src/commands/interfaces/memory.rs new file mode 100644 index 00000000..27be16db --- /dev/null +++ b/src/commands/interfaces/memory.rs @@ -0,0 +1,61 @@ +use crate::commands; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{MemoryStats, RedisKey}; +use crate::utils; + +/// Functions that implement the [Memory](https://redis.io/commands#server) interface. +pub trait MemoryInterface: ClientLike + Sized { + /// The MEMORY DOCTOR command reports about different memory-related issues that the Redis server experiences, and advises about possible remedies. + /// + /// + fn memory_doctor(&self) -> AsyncResult { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::memory::memory_doctor(&inner).await + }) + } + + /// The MEMORY MALLOC-STATS command provides an internal statistics report from the memory allocator. + /// + /// + fn memory_malloc_stats(&self) -> AsyncResult { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::memory::memory_malloc_stats(&inner).await + }) + } + + /// The MEMORY PURGE command attempts to purge dirty pages so these can be reclaimed by the allocator. + /// + /// + fn memory_purge(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::memory::memory_purge(&inner).await + }) + } + + /// The MEMORY STATS command returns an Array reply about the memory usage of the server. + /// + /// + fn memory_stats(&self) -> AsyncResult { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::memory::memory_stats(&inner).await + }) + } + + /// The MEMORY USAGE command reports the number of bytes that a key and its value require to be stored in RAM. + /// + /// + fn memory_usage(&self, key: K, samples: Option) -> AsyncResult> + where + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::memory::memory_usage(&inner, key, samples).await + }) + } +} diff --git a/src/commands/interfaces/metrics.rs b/src/commands/interfaces/metrics.rs new file mode 100644 index 00000000..ef8f17d6 --- /dev/null +++ b/src/commands/interfaces/metrics.rs @@ -0,0 +1,87 @@ +use crate::interfaces::ClientLike; +use crate::utils; + +#[cfg(feature = "metrics")] +use crate::modules::metrics::Stats; + +/// Functions that implement the internal metrics interface, largely controlled by the `metrics` feature flag. +pub trait MetricsInterface: ClientLike + Sized { + /// Read the number of request redeliveries. + /// + /// This is the number of times a request had to be sent again due to a connection closing while waiting on a response. + fn read_redelivery_count(&self) -> usize { + utils::read_atomic(&self.inner().redeliver_count) + } + + /// Read and reset the number of request redeliveries. + fn take_redelivery_count(&self) -> usize { + utils::set_atomic(&self.inner().redeliver_count, 0) + } + + /// Read the number of buffered commands that have not yet been sent to the server. + fn command_queue_len(&self) -> usize { + utils::read_atomic(&self.inner().cmd_buffer_len) + } + + /// Read latency metrics across all commands. + /// + /// This metric reflects the total latency experienced by callers, including time spent waiting in memory to be written and network latency. + /// Features such as automatic reconnect, `reconnect-on-auth-error`, and frame serialization time can all affect these values. + #[cfg(feature = "metrics")] + #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] + fn read_latency_metrics(&self) -> Stats { + self.inner().latency_stats.read().read_metrics() + } + + /// Read and consume latency metrics, resetting their values afterwards. + #[cfg(feature = "metrics")] + #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] + fn take_latency_metrics(&self) -> Stats { + self.inner().latency_stats.write().take_metrics() + } + + /// Read network latency metrics across all commands. + /// + /// This metric only reflects time spent waiting on a response. It will factor in reconnect time if a response doesn't arrive due to a connection + /// closing, but it does not factor in the time a command spends waiting to be written, serialization time, backpressure, etc. + #[cfg(feature = "metrics")] + #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] + fn read_network_latency_metrics(&self) -> Stats { + self.inner().network_latency_stats.read().read_metrics() + } + + /// Read and consume network latency metrics, resetting their values afterwards. + #[cfg(feature = "metrics")] + #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] + fn take_network_latency_metrics(&self) -> Stats { + self.inner().network_latency_stats.write().take_metrics() + } + + /// Read request payload size metrics across all commands. + #[cfg(feature = "metrics")] + #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] + fn read_req_size_metrics(&self) -> Stats { + self.inner().req_size_stats.read().read_metrics() + } + + /// Read and consume request payload size metrics, resetting their values afterwards. + #[cfg(feature = "metrics")] + #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] + fn take_req_size_metrics(&self) -> Stats { + self.inner().req_size_stats.write().take_metrics() + } + + /// Read response payload size metrics across all commands. + #[cfg(feature = "metrics")] + #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] + fn read_res_size_metrics(&self) -> Stats { + self.inner().res_size_stats.read().read_metrics() + } + + /// Read and consume response payload size metrics, resetting their values afterwards. + #[cfg(feature = "metrics")] + #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] + fn take_res_size_metrics(&self) -> Stats { + self.inner().res_size_stats.write().take_metrics() + } +} diff --git a/src/commands/interfaces/mod.rs b/src/commands/interfaces/mod.rs new file mode 100644 index 00000000..dbba2075 --- /dev/null +++ b/src/commands/interfaces/mod.rs @@ -0,0 +1,24 @@ +pub mod acl; +pub mod client; +pub mod cluster; +pub mod config; +pub mod geo; +pub mod hashes; +pub mod hyperloglog; +pub mod keys; +pub mod lists; +pub mod lua; +pub mod memory; +pub mod metrics; +pub mod pubsub; +pub mod scan; +pub mod server; +pub mod sets; +pub mod slowlog; +pub mod sorted_sets; +pub mod streams; +pub mod strings; +pub mod transactions; + +#[cfg(feature = "sentinel-client")] +pub mod sentinel; diff --git a/src/commands/interfaces/pubsub.rs b/src/commands/interfaces/pubsub.rs new file mode 100644 index 00000000..be9bc5b0 --- /dev/null +++ b/src/commands/interfaces/pubsub.rs @@ -0,0 +1,107 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, AsyncStream, ClientLike}; +use crate::types::{FromRedis, KeyspaceEvent, MultipleStrings, RedisValue}; +use bytes_utils::Str; +use std::convert::TryInto; +use tokio::sync::mpsc::unbounded_channel; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// Functions that implement the [publish-subscribe](https://redis.io/commands#pubsub) interface. +pub trait PubsubInterface: ClientLike + Sized { + /// Listen for `(channel, message)` tuples on the publish-subscribe interface. **Keyspace events are not sent on this interface.** + /// + /// If the connection to the Redis server closes for any reason this function does not need to be called again. Messages will start appearing on the original stream after [subscribe](Self::subscribe) is called again. + fn on_message(&self) -> AsyncStream<(String, RedisValue)> { + let (tx, rx) = unbounded_channel(); + self.inner().message_tx.write().push_back(tx); + + UnboundedReceiverStream::new(rx).into() + } + + /// Listen for keyspace and keyevent notifications on the publish subscribe interface. + /// + /// Callers still need to configure the server and subscribe to the relevant channels, but this interface will format the messages automatically. + /// + /// If the connection to the Redis server closes for any reason this function does not need to be called again. + /// + /// + fn on_keyspace_event(&self) -> AsyncStream { + let (tx, rx) = unbounded_channel(); + self.inner().keyspace_tx.write().push_back(tx); + + UnboundedReceiverStream::new(rx).into() + } + + /// Subscribe to a channel on the PubSub interface, returning the number of channels to which the client is subscribed. + /// + /// Any messages received before `on_message` is called will be discarded, so it's usually best to call `on_message` + /// before calling `subscribe` for the first time. + /// + /// + fn subscribe(&self, channel: S) -> AsyncResult + where + S: Into, + { + into!(channel); + async_spawn(self, |inner| async move { + commands::pubsub::subscribe(&inner, channel).await + }) + } + + /// Unsubscribe from a channel on the PubSub interface, returning the number of channels to which hte client is subscribed. + /// + /// + fn unsubscribe(&self, channel: S) -> AsyncResult + where + S: Into, + { + into!(channel); + async_spawn(self, |inner| async move { + commands::pubsub::unsubscribe(&inner, channel).await + }) + } + + /// Subscribes the client to the given patterns. + /// + /// + fn psubscribe(&self, patterns: S) -> AsyncResult> + where + S: Into, + { + into!(patterns); + async_spawn(self, |inner| async move { + commands::pubsub::psubscribe(&inner, patterns).await + }) + } + + /// Unsubscribes the client from the given patterns, or from all of them if none is given. + /// + /// + fn punsubscribe(&self, patterns: S) -> AsyncResult> + where + S: Into, + { + into!(patterns); + async_spawn(self, |inner| async move { + commands::pubsub::punsubscribe(&inner, patterns).await + }) + } + + /// Publish a message on the PubSub interface, returning the number of clients that received the message. + /// + /// + fn publish(&self, channel: S, message: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + V: TryInto, + V::Error: Into, + { + into!(channel); + try_into!(message); + async_spawn(self, |inner| async move { + commands::pubsub::publish(&inner, channel, message).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/scan.rs b/src/commands/interfaces/scan.rs new file mode 100644 index 00000000..f4011a66 --- /dev/null +++ b/src/commands/interfaces/scan.rs @@ -0,0 +1 @@ +// `impl Trait` doesn't work inside traits, so we just put these functions directly on the RedisClient diff --git a/src/commands/interfaces/sentinel.rs b/src/commands/interfaces/sentinel.rs new file mode 100644 index 00000000..4dc6964a --- /dev/null +++ b/src/commands/interfaces/sentinel.rs @@ -0,0 +1,230 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{FromRedis, RedisMap, RedisValue, SentinelFailureKind}; +use bytes_utils::Str; +use std::convert::TryInto; +use std::net::IpAddr; + +/// Functions that implement the [Sentinel](https://redis.io/topics/sentinel#sentinel-commands) interface. +pub trait SentinelInterface: ClientLike + Sized { + /// Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. + fn ckquorum(&self, name: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + N: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::sentinel::ckquorum(&inner, name).await?.convert() + }) + } + + /// Force Sentinel to rewrite its configuration on disk, including the current Sentinel state. + fn flushconfig(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::sentinel::flushconfig(&inner).await?.convert() + }) + } + + /// Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels. + fn failover(&self, name: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + N: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::sentinel::failover(&inner, name).await?.convert() + }) + } + + /// Return the ip and port number of the master with that name. + fn get_master_addr_by_name(&self, name: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + N: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::sentinel::get_master_addr_by_name(&inner, name) + .await? + .convert() + }) + } + + /// Return cached INFO output from masters and replicas. + fn info_cache(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::sentinel::info_cache(&inner).await?.convert() + }) + } + + /// Show the state and info of the specified master. + fn master(&self, name: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + N: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::sentinel::master(&inner, name).await?.convert() + }) + } + + /// Show a list of monitored masters and their state. + fn masters(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::sentinel::masters(&inner).await?.convert() + }) + } + + /// Start Sentinel's monitoring. + /// + /// + fn monitor(&self, name: N, ip: IpAddr, port: u16, quorum: u32) -> AsyncResult + where + R: FromRedis + Unpin + Send, + N: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::sentinel::monitor(&inner, name, ip, port, quorum) + .await? + .convert() + }) + } + + /// Return the ID of the Sentinel instance. + fn myid(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::sentinel::myid(&inner).await?.convert() + }) + } + + /// This command returns information about pending scripts. + fn pending_scripts(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::sentinel::pending_scripts(&inner).await?.convert() + }) + } + + /// Stop Sentinel's monitoring. + /// + /// + fn remove(&self, name: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + N: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::sentinel::remove(&inner, name).await?.convert() + }) + } + + /// Show a list of replicas for this master, and their state. + fn replicas(&self, name: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + N: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::sentinel::replicas(&inner, name).await?.convert() + }) + } + + /// Show a list of sentinel instances for this master, and their state. + fn sentinels(&self, name: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + N: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::sentinel::sentinels(&inner, name).await?.convert() + }) + } + + /// Set Sentinel's monitoring configuration. + /// + /// + fn set(&self, name: N, args: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + N: Into, + V: TryInto, + V::Error: Into, + { + into!(name); + try_into!(args); + async_spawn(self, |inner| async move { + commands::sentinel::set(&inner, name, args.into()).await?.convert() + }) + } + + /// This command simulates different Sentinel crash scenarios. + fn simulate_failure(&self, kind: SentinelFailureKind) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::sentinel::simulate_failure(&inner, kind).await?.convert() + }) + } + + /// This command will reset all the masters with matching name. + fn reset(&self, pattern: P) -> AsyncResult + where + R: FromRedis + Unpin + Send, + P: Into, + { + into!(pattern); + async_spawn(self, |inner| async move { + commands::sentinel::reset(&inner, pattern).await?.convert() + }) + } + + /// Get the current value of a global Sentinel configuration parameter. The specified name may be a wildcard, similar to the Redis CONFIG GET command. + fn config_get(&self, name: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(name); + async_spawn(self, |inner| async move { + commands::sentinel::config_get(&inner, name).await?.convert() + }) + } + + /// Set the value of a global Sentinel configuration parameter. + fn config_set(&self, name: K, value: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(name); + try_into!(value); + async_spawn(self, |inner| async move { + commands::sentinel::config_set(&inner, name, value).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/server.rs b/src/commands/interfaces/server.rs new file mode 100644 index 00000000..4679a9b2 --- /dev/null +++ b/src/commands/interfaces/server.rs @@ -0,0 +1,167 @@ +use crate::commands; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::FromRedis; +use crate::types::RespVersion; +use crate::utils; +use bytes_utils::Str; +use std::time::Duration; +use tokio::time::interval as tokio_interval; + +/// Functions for authenticating clients. +pub trait AuthInterface: ClientLike + Sized { + /// Request for authentication in a password-protected Redis server. Returns ok if successful. + /// + /// The client will automatically authenticate with the default user if a password is provided in the associated `RedisConfig` when calling [connect](crate::interfaces::ClientLike::connect). + /// + /// If running against clustered servers this function will authenticate all connections. + /// + /// + fn auth(&self, username: Option, password: S) -> AsyncResult<()> + where + S: Into, + { + into!(password); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::server::auth(&inner, username, password).await + }) + } + + /// Switch to a different protocol, optionally authenticating in the process. + /// + /// If running against clustered servers this function will issue the HELLO command to each server concurrently. + /// + /// + fn hello(&self, version: RespVersion, auth: Option<(String, String)>) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::server::hello(&inner, version, auth).await + }) + } +} + +/// Functions that provide a connection heartbeat interface. +pub trait HeartbeatInterface: ClientLike + Sized + Clone + 'static { + /// Return a future that will ping the server on an interval. + /// + /// When running against a cluster this will ping a random node on each interval. + #[allow(unreachable_code)] + fn enable_heartbeat(&self, interval: Duration, break_on_error: bool) -> AsyncResult<()> { + let _self = self.clone(); + + async_spawn(self, |inner| async move { + let mut interval = tokio_interval(interval); + + loop { + interval.tick().await; + + if utils::is_locked_some(&inner.multi_block) { + _debug!(inner, "Skip heartbeat while inside transaction."); + continue; + } + + if break_on_error { + let _ = _self.ping().await?; + } else { + if let Err(e) = _self.ping().await { + _warn!(inner, "Heartbeat ping failed with error: {:?}", e); + } + } + } + + Ok(()) + }) + } +} + +/// Functions that implement the [Server](https://redis.io/commands#server) interface. +pub trait ServerInterface: ClientLike + Sized { + /// Instruct Redis to start an Append Only File rewrite process. + /// + /// + fn bgrewriteaof(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::server::bgrewriteaof(&inner).await?.convert() + }) + } + + /// Save the DB in background. + /// + /// + fn bgsave(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::server::bgsave(&inner).await?.convert() + }) + } + + /// Return the number of keys in the selected database. + /// + /// + fn dbsize(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::server::dbsize(&inner).await?.convert() + }) + } + + /// Delete the keys in all databases. + /// + /// + fn flushall(&self, r#async: bool) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::server::flushall(&inner, r#async).await?.convert() + }) + } + + /// Delete the keys on all nodes in the cluster. This is a special function that does not map directly to the Redis interface. + fn flushall_cluster(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::server::flushall_cluster(&inner).await + }) + } + + /// Select the database this client should use. + /// + /// + fn select(&self, db: u8) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + commands::server::select(&inner, db).await?.convert() + }) + } + + /// This command will start a coordinated failover between the currently-connected-to master and one of its replicas. + /// + /// + fn failover(&self, to: Option<(String, u16)>, force: bool, abort: bool, timeout: Option) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::server::failover(&inner, to, force, abort, timeout).await + }) + } + + /// Return the UNIX TIME of the last DB save executed with success. + /// + /// + fn lastsave(&self) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::server::lastsave(&inner).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/sets.rs b/src/commands/interfaces/sets.rs new file mode 100644 index 00000000..8493f6e5 --- /dev/null +++ b/src/commands/interfaces/sets.rs @@ -0,0 +1,239 @@ +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{MultipleKeys, MultipleValues, RedisKey, FromRedis, RedisValue}; +use crate::{commands}; +use std::convert::TryInto; + +/// Functions that implement the [Sets](https://redis.io/commands#set) interface. +pub trait SetsInterface: ClientLike + Sized { + /// Add the specified members to the set stored at `key`. + /// + /// + fn sadd(&self, key: K, members: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(members); + async_spawn(self, |inner| async move { + commands::sets::sadd(&inner, key, members).await?.convert() + }) + } + + /// Returns the set cardinality (number of elements) of the set stored at `key`. + /// + /// + fn scard(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sets::scard(&inner, key).await?.convert() + }) + } + + /// Returns the members of the set resulting from the difference between the first set and all the successive sets. + /// + /// + fn sdiff(&self, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::sets::sdiff(&inner, keys).await?.convert() + }) + } + + /// This command is equal to SDIFF, but instead of returning the resulting set, it is stored in `destination`. + /// + /// + fn sdiffstore(&self, dest: D, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + D: Into, + K: Into, + { + into!(dest, keys); + async_spawn(self, |inner| async move { + commands::sets::sdiffstore(&inner, dest, keys).await?.convert() + }) + } + + /// Returns the members of the set resulting from the intersection of all the given sets. + /// + /// + fn sinter(&self, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::sets::sinter(&inner, keys).await?.convert() + }) + } + + /// This command is equal to SINTER, but instead of returning the resulting set, it is stored in `destination`. + /// + /// + fn sinterstore(&self, dest: D, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + D: Into, + K: Into, + { + into!(dest, keys); + async_spawn(self, |inner| async move { + commands::sets::sinterstore(&inner, dest, keys).await?.convert() + }) + } + + /// Returns if `member` is a member of the set stored at `key`. + /// + /// + fn sismember(&self, key: K, member: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(member); + async_spawn(self, |inner| async move { + commands::sets::sismember(&inner, key, member).await?.convert() + }) + } + + /// Returns whether each member is a member of the set stored at `key`. + /// + /// + fn smismember(&self, key: K, members: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(members); + async_spawn(self, |inner| async move { + commands::sets::smismember(&inner, key, members).await?.convert() + }) + } + + /// Returns all the members of the set value stored at `key`. + /// + /// + fn smembers(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sets::smembers(&inner, key).await?.convert() + }) + } + + /// Move `member` from the set at `source` to the set at `destination`. + /// + /// + fn smove(&self, source: S, dest: D, member: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + S: Into, + D: Into, + V: TryInto, + V::Error: Into, + { + into!(source, dest); + try_into!(member); + async_spawn(self, |inner| async move { + commands::sets::smove(&inner, source, dest, member).await?.convert() + }) + } + + /// Removes and returns one or more random members from the set value store at `key`. + /// + /// + fn spop(&self, key: K, count: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sets::spop(&inner, key, count).await?.convert() + }) + } + + /// When called with just the key argument, return a random element from the set value stored at `key`. + /// + /// If the provided `count` argument is positive, return an array of distinct elements. The array's length is either count or the set's cardinality (SCARD), whichever is lower. + /// + /// + fn srandmember(&self, key: K, count: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sets::srandmember(&inner, key, count).await?.convert() + }) + } + + /// Remove the specified members from the set stored at `key`. + /// + /// + fn srem(&self, key: K, members: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(members); + async_spawn(self, |inner| async move { + commands::sets::srem(&inner, key, members).await?.convert() + }) + } + + /// Returns the members of the set resulting from the union of all the given sets. + /// + /// + fn sunion(&self, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::sets::sunion(&inner, keys).await?.convert() + }) + } + + /// This command is equal to SUNION, but instead of returning the resulting set, it is stored in `destination`. + /// + /// + fn sunionstore(&self, dest: D, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + D: Into, + K: Into, + { + into!(dest, keys); + async_spawn(self, |inner| async move { + commands::sets::sunionstore(&inner, dest, keys).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/slowlog.rs b/src/commands/interfaces/slowlog.rs new file mode 100644 index 00000000..02965396 --- /dev/null +++ b/src/commands/interfaces/slowlog.rs @@ -0,0 +1,36 @@ +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::SlowlogEntry; +use crate::{commands, utils}; + +/// Functions that implement the [slowlog](https://redis.io/commands#server) interface. +pub trait SlowlogInterface: ClientLike + Sized { + /// This command is used to read the slow queries log. + /// + /// + fn slowlog_get(&self, count: Option) -> AsyncResult> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::slowlog::slowlog_get(&inner, count).await + }) + } + + /// This command is used to read length of the slow queries log. + /// + /// + fn slowlog_length(&self) -> AsyncResult { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::slowlog::slowlog_length(&inner).await + }) + } + + /// This command is used to reset the slow queries log. + /// + /// + fn slowlog_reset(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::slowlog::slowlog_reset(&inner).await + }) + } +} diff --git a/src/commands/interfaces/sorted_sets.rs b/src/commands/interfaces/sorted_sets.rs new file mode 100644 index 00000000..157d794c --- /dev/null +++ b/src/commands/interfaces/sorted_sets.rs @@ -0,0 +1,617 @@ +use crate::commands; +use crate::error::RedisError; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::types::{ + AggregateOptions, FromRedis, Limit, MultipleKeys, MultipleValues, MultipleWeights, MultipleZaddValues, Ordering, + RedisKey, RedisValue, SetOptions, ZRange, ZSort, +}; +use std::convert::TryInto; + +/// Functions that implement the [Sorted Sets](https://redis.io/commands#sorted_set) interface. +pub trait SortedSetsInterface: ClientLike + Sized { + /// The blocking variant of the ZPOPMIN command. + /// + /// + fn bzpopmin(&self, keys: K, timeout: f64) -> AsyncResult> + where + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::sorted_sets::bzpopmin(&inner, keys, timeout).await + }) + } + + /// The blocking variant of the ZPOPMAX command. + /// + /// + fn bzpopmax(&self, keys: K, timeout: f64) -> AsyncResult> + where + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::sorted_sets::bzpopmax(&inner, keys, timeout).await + }) + } + + /// Adds all the specified members with the specified scores to the sorted set stored at `key`. + /// + /// + fn zadd( + &self, + key: K, + options: Option, + ordering: Option, + changed: bool, + incr: bool, + values: V, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(values); + async_spawn(self, |inner| async move { + commands::sorted_sets::zadd(&inner, key, options, ordering, changed, incr, values) + .await? + .convert() + }) + } + + /// Returns the sorted set cardinality (number of elements) of the sorted set stored at `key`. + /// + /// + fn zcard(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sorted_sets::zcard(&inner, key).await?.convert() + }) + } + + /// Returns the number of elements in the sorted set at `key` with a score between `min` and `max`. + /// + /// + fn zcount(&self, key: K, min: f64, max: f64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sorted_sets::zcount(&inner, key, min, max).await?.convert() + }) + } + + /// This command is similar to ZDIFFSTORE, but instead of storing the resulting sorted set, it is returned to the client. + /// + /// + fn zdiff(&self, keys: K, withscores: bool) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + commands::sorted_sets::zdiff(&inner, keys, withscores).await?.convert() + }) + } + + /// Computes the difference between the first and all successive input sorted sets and stores the result in `destination`. + /// + /// + fn zdiffstore(&self, dest: D, keys: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + D: Into, + K: Into, + { + into!(dest, keys); + async_spawn(self, |inner| async move { + commands::sorted_sets::zdiffstore(&inner, dest, keys).await?.convert() + }) + } + + /// Increments the score of `member` in the sorted set stored at `key` by `increment`. + /// + /// + fn zincrby(&self, key: K, increment: f64, member: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(member); + async_spawn(self, |inner| async move { + commands::sorted_sets::zincrby(&inner, key, increment, member) + .await? + .convert() + }) + } + + /// This command is similar to ZINTERSTORE, but instead of storing the resulting sorted set, it is returned to the client. + /// + /// + fn zinter( + &self, + keys: K, + weights: W, + aggregate: Option, + withscores: bool, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + W: Into, + { + into!(keys, weights); + async_spawn(self, |inner| async move { + commands::sorted_sets::zinter(&inner, keys, weights, aggregate, withscores) + .await? + .convert() + }) + } + + /// Computes the intersection of the sorted sets given by the specified keys, and stores the result in `destination`. + /// + /// + fn zinterstore( + &self, + dest: D, + keys: K, + weights: W, + aggregate: Option, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + D: Into, + K: Into, + W: Into, + { + into!(dest, keys, weights); + async_spawn(self, |inner| async move { + commands::sorted_sets::zinterstore(&inner, dest, keys, weights, aggregate) + .await? + .convert() + }) + } + + /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, + /// this command returns the number of elements in the sorted set at key with a value between min and max. + /// + /// + fn zlexcount(&self, key: K, min: M, max: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + M: TryInto, + M::Error: Into, + N: TryInto, + N::Error: Into, + { + into!(key); + try_into!(min, max); + async_spawn(self, |inner| async move { + commands::sorted_sets::zlexcount(&inner, key, min, max).await?.convert() + }) + } + + /// Removes and returns up to count members with the highest scores in the sorted set stored at `key`. + /// + /// + fn zpopmax(&self, key: K, count: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sorted_sets::zpopmax(&inner, key, count).await?.convert() + }) + } + + /// Removes and returns up to count members with the lowest scores in the sorted set stored at `key`. + /// + /// + fn zpopmin(&self, key: K, count: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sorted_sets::zpopmin(&inner, key, count).await?.convert() + }) + } + + /// When called with just the key argument, return a random element from the sorted set value stored at `key`. + /// + /// + fn zrandmember(&self, key: K, count: Option<(i64, bool)>) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrandmember(&inner, key, count).await?.convert() + }) + } + + /// This command is like ZRANGE, but stores the result in the `destination` key. + /// + /// + fn zrangestore( + &self, + dest: D, + source: S, + min: M, + max: N, + sort: Option, + rev: bool, + limit: Option, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + D: Into, + S: Into, + M: TryInto, + M::Error: Into, + N: TryInto, + N::Error: Into, + { + into!(dest, source); + try_into!(min, max); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrangestore(&inner, dest, source, min, max, sort, rev, limit) + .await? + .convert() + }) + } + + /// Returns the specified range of elements in the sorted set stored at `key`. + /// + /// + fn zrange( + &self, + key: K, + min: M, + max: N, + sort: Option, + rev: bool, + limit: Option, + withscores: bool, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + M: TryInto, + M::Error: Into, + N: TryInto, + N::Error: Into, + { + into!(key); + try_into!(min, max); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrange(&inner, key, min, max, sort, rev, limit, withscores) + .await? + .convert() + }) + } + + /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical + /// ordering, this command returns all the elements in the sorted set at `key` with a value between `min` and `max`. + /// + /// + fn zrangebylex(&self, key: K, min: M, max: N, limit: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + M: TryInto, + M::Error: Into, + N: TryInto, + N::Error: Into, + { + into!(key); + try_into!(min, max); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrangebylex(&inner, key, min, max, limit) + .await? + .convert() + }) + } + + /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical + /// ordering, this command returns all the elements in the sorted set at `key` with a value between `max` and `min`. + /// + /// + fn zrevrangebylex(&self, key: K, max: M, min: N, limit: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + M: TryInto, + M::Error: Into, + N: TryInto, + N::Error: Into, + { + into!(key); + try_into!(max, min); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrevrangebylex(&inner, key, max, min, limit) + .await? + .convert() + }) + } + + /// Returns all the elements in the sorted set at key with a score between `min` and `max` (including elements + /// with score equal to `min` or `max`). + /// + /// + fn zrangebyscore( + &self, + key: K, + min: M, + max: N, + withscores: bool, + limit: Option, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + M: TryInto, + M::Error: Into, + N: TryInto, + N::Error: Into, + { + into!(key); + try_into!(min, max); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrangebyscore(&inner, key, min, max, withscores, limit) + .await? + .convert() + }) + } + + /// Returns all the elements in the sorted set at `key` with a score between `max` and `min` (including + /// elements with score equal to `max` or `min`). + /// + /// + fn zrevrangebyscore( + &self, + key: K, + max: M, + min: N, + withscores: bool, + limit: Option, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + M: TryInto, + M::Error: Into, + N: TryInto, + N::Error: Into, + { + into!(key); + try_into!(max, min); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrevrangebyscore(&inner, key, max, min, withscores, limit) + .await? + .convert() + }) + } + + /// Returns the rank of member in the sorted set stored at `key`, with the scores ordered from low to high. + /// + /// + fn zrank(&self, key: K, member: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(member); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrank(&inner, key, member).await?.convert() + }) + } + + /// Removes the specified members from the sorted set stored at `key`. Non existing members are ignored. + /// + /// + fn zrem(&self, key: K, members: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(members); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrem(&inner, key, members).await?.convert() + }) + } + + /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical + /// ordering, this command removes all elements in the sorted set stored at `key` between the lexicographical range + /// specified by `min` and `max`. + /// + /// + fn zremrangebylex(&self, key: K, min: M, max: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + M: TryInto, + M::Error: Into, + N: TryInto, + N::Error: Into, + { + into!(key); + try_into!(min, max); + async_spawn(self, |inner| async move { + commands::sorted_sets::zremrangebylex(&inner, key, min, max) + .await? + .convert() + }) + } + + /// Removes all elements in the sorted set stored at `key` with rank between `start` and `stop`. + /// + /// + fn zremrangebyrank(&self, key: K, start: i64, stop: i64) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sorted_sets::zremrangebyrank(&inner, key, start, stop) + .await? + .convert() + }) + } + + /// Removes all elements in the sorted set stored at `key` with a score between `min` and `max`. + /// + /// + fn zremrangebyscore(&self, key: K, min: M, max: N) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + M: TryInto, + M::Error: Into, + N: TryInto, + N::Error: Into, + { + into!(key); + try_into!(min, max); + async_spawn(self, |inner| async move { + commands::sorted_sets::zremrangebyscore(&inner, key, min, max) + .await? + .convert() + }) + } + + /// Returns the specified range of elements in the sorted set stored at `key`. + /// + /// + fn zrevrange(&self, key: K, start: i64, stop: i64, withscores: bool) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrevrange(&inner, key, start, stop, withscores) + .await? + .convert() + }) + } + + /// Returns the rank of `member` in the sorted set stored at `key`, with the scores ordered from high to low. + /// + /// + fn zrevrank(&self, key: K, member: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(member); + async_spawn(self, |inner| async move { + commands::sorted_sets::zrevrank(&inner, key, member).await?.convert() + }) + } + + /// Returns the score of `member` in the sorted set at `key`. + /// + /// + fn zscore(&self, key: K, member: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(member); + async_spawn(self, |inner| async move { + commands::sorted_sets::zscore(&inner, key, member).await?.convert() + }) + } + + /// This command is similar to ZUNIONSTORE, but instead of storing the resulting sorted set, it is returned to the client. + /// + /// + fn zunion( + &self, + keys: K, + weights: W, + aggregate: Option, + withscores: bool, + ) -> AsyncResult + where + K: Into, + W: Into, + { + into!(keys, weights); + async_spawn(self, |inner| async move { + commands::sorted_sets::zunion(&inner, keys, weights, aggregate, withscores).await + }) + } + + /// Computes the union of the sorted sets given by the specified keys, and stores the result in `destination`. + /// + /// + fn zunionstore( + &self, + dest: D, + keys: K, + weights: W, + aggregate: Option, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + D: Into, + K: Into, + W: Into, + { + into!(dest, keys, weights); + async_spawn(self, |inner| async move { + commands::sorted_sets::zunionstore(&inner, dest, keys, weights, aggregate) + .await? + .convert() + }) + } + + /// Returns the scores associated with the specified members in the sorted set stored at `key`. + /// + /// + fn zmscore(&self, key: K, members: V) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + V: TryInto, + V::Error: Into, + { + into!(key); + try_into!(members); + async_spawn(self, |inner| async move { + commands::sorted_sets::zmscore(&inner, key, members).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/streams.rs b/src/commands/interfaces/streams.rs new file mode 100644 index 00000000..052a4842 --- /dev/null +++ b/src/commands/interfaces/streams.rs @@ -0,0 +1,729 @@ +use crate::commands; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::prelude::RedisError; +use crate::types::{ + FromRedis, FromRedisKey, MultipleIDs, MultipleKeys, MultipleOrderedPairs, MultipleStrings, RedisKey, RedisValue, + XCap, XPendingArgs, XReadResponse, XReadValue, XID, +}; +use bytes_utils::Str; +use std::convert::TryInto; +use std::hash::Hash; + +/// A trait that implements the [streams](https://redis.io/commands#stream) interface. +/// +/// **Note:** Several of the stream commands can return types with verbose type declarations. Additionally, certain commands can be parsed differently in RESP2 and RESP3 modes. +/// As a result this interface provides some utility functions that can make this easier. Functions such as [xread_map](Self::xread_map), [xreadgroup_map](Self::xreadgroup_map), +/// [xrange_values](Self::xrange_values), etc exist to make this easier on callers. These functions apply an additional layer of parsing logic that can make declaring response +/// types easier, as well as automatically handling the differences between RESP2 and RESP3 return value types. +pub trait StreamsInterface: ClientLike + Sized { + /// This command returns the list of consumers that belong to the `groupname` consumer group of the stream stored at `key`. + /// + /// + fn xinfo_consumers(&self, key: K, groupname: S) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + S: Into, + { + into!(key, groupname); + async_spawn(self, |inner| async move { + commands::streams::xinfo_consumers(&inner, key, groupname) + .await? + .convert() + }) + } + + /// This command returns the list of all consumers groups of the stream stored at `key`. + /// + /// + fn xinfo_groups(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::streams::xinfo_groups(&inner, key).await?.convert() + }) + } + + /// This command returns information about the stream stored at `key`. + /// + /// + fn xinfo_stream(&self, key: K, full: bool, count: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::streams::xinfo_stream(&inner, key, full, count) + .await? + .convert() + }) + } + + /// Appends the specified stream entry to the stream at the specified key. If the key does not exist, as a side effect of + /// running this command the key is created with a stream value. The creation of stream's key can be disabled with the + /// NOMKSTREAM option. + /// + /// + fn xadd(&self, key: K, nomkstream: bool, cap: C, id: I, fields: F) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + I: Into, + F: TryInto, + F::Error: Into, + C: TryInto, + C::Error: Into, + { + into!(key, id); + try_into!(fields, cap); + async_spawn(self, |inner| async move { + commands::streams::xadd(&inner, key, nomkstream, cap, id, fields) + .await? + .convert() + }) + } + + /// Trims the stream by evicting older entries (entries with lower IDs) if needed. + /// + /// + fn xtrim(&self, key: K, cap: C) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + C: TryInto, + C::Error: Into, + { + into!(key); + try_into!(cap); + async_spawn(self, |inner| async move { + commands::streams::xtrim(&inner, key, cap).await?.convert() + }) + } + + /// Removes the specified entries from a stream, and returns the number of entries deleted. + /// + /// + fn xdel(&self, key: K, ids: S) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + S: Into, + { + into!(key, ids); + async_spawn(self, |inner| async move { + commands::streams::xdel(&inner, key, ids).await?.convert() + }) + } + + /// Return the stream entries matching the provided range of IDs, automatically converting to a less verbose type definition. + /// + /// + fn xrange_values( + &self, + key: K, + start: S, + end: E, + count: Option, + ) -> AsyncResult>> + where + Ri: FromRedis + Unpin + Send, + Rk: FromRedisKey + Hash + Eq + Unpin + Send, + Rv: FromRedis + Unpin + Send, + K: Into, + S: TryInto, + S::Error: Into, + E: TryInto, + E::Error: Into, + { + into!(key); + try_into!(start, end); + async_spawn(self, |inner| async move { + commands::streams::xrange(&inner, key, start, end, count) + .await? + .into_xread_value() + }) + } + + /// The command returns the stream entries matching a given range of IDs. The range is specified by a minimum + /// and maximum ID. All the entries having an ID between the two specified or exactly one of the two IDs specified + /// (closed interval) are returned. + /// + /// + /// + /// **See [xrange_values](Self::xrange_values) for a variation of this function that may be more useful.** + fn xrange(&self, key: K, start: S, end: E, count: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + S: TryInto, + S::Error: Into, + E: TryInto, + E::Error: Into, + { + into!(key); + try_into!(start, end); + async_spawn(self, |inner| async move { + commands::streams::xrange(&inner, key, start, end, count) + .await? + .convert() + }) + } + + /// Similar to `XRANGE`, but with the results returned in reverse order. The results will be automatically converted to a less verbose type definition. + /// + /// + fn xrevrange_values( + &self, + key: K, + end: E, + start: S, + count: Option, + ) -> AsyncResult>> + where + Ri: FromRedis + Unpin + Send, + Rk: FromRedisKey + Hash + Eq + Unpin + Send, + Rv: FromRedis + Unpin + Send, + K: Into, + S: TryInto, + S::Error: Into, + E: TryInto, + E::Error: Into, + { + into!(key); + try_into!(start, end); + async_spawn(self, |inner| async move { + commands::streams::xrevrange(&inner, key, end, start, count) + .await? + .into_xread_value() + }) + } + + /// Similar to `XRANGE`, but with the results returned in reverse order. + /// + /// + /// + /// **See the [xrevrange_values](Self::xrevrange_values) for a variation of this function that may be more useful.** + fn xrevrange(&self, key: K, end: E, start: S, count: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + S: TryInto, + S::Error: Into, + E: TryInto, + E::Error: Into, + { + into!(key); + try_into!(start, end); + async_spawn(self, |inner| async move { + commands::streams::xrevrange(&inner, key, end, start, count) + .await? + .convert() + }) + } + + /// Returns the number of entries inside a stream. + /// + /// + fn xlen(&self, key: K) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + { + into!(key); + async_spawn(self, |inner| async move { + commands::streams::xlen(&inner, key).await?.convert() + }) + } + + /// Read data from one or multiple streams, only returning entries with an ID greater than the last received ID reported by the caller. + /// + /// + /// + /// The `XREAD` and `XREADGROUP` commands return values that can be interpreted differently in RESP2 and RESP3 mode. In many cases it is also easier to operate on the + /// return values of these functions as a `HashMap`, but manually declaring this type can be very verbose. This function will automatically convert the response to the + /// [most common](crate::types::XReadResponse) map representation while also handling the encoding differences between RESP2 and RESP3. + /// + /// ```rust no_run + /// # use fred::types::XReadResponse; + /// // borrowed from the tests. XREAD and XREADGROUP are very similar. + /// let result: XReadResponse = client + /// .xreadgroup_map("group1", "consumer1", None, None, false, "foo", ">") + /// .await?; + /// println!("Result: {:?}", result); + /// // Result: {"foo": [("1646240801081-0", {"count": 0}), ("1646240801082-0", {"count": 1}), ("1646240801082-1", {"count": 2})]} + /// + /// assert_eq!(result.len(), 1); + /// for (idx, (id, record)) in result.get("foo").unwrap().into_iter().enumerate() { + /// let value = record.get("count").expect("Failed to read count"); + /// assert_eq!(idx, *value); + /// } + /// ``` + // The underlying issue here isn't so much a semantic difference between RESP2 and RESP3, but rather an assumption that went into the logic behind the `FromRedis` trait. + // + // In all other Redis commands that return "maps" in RESP2 (or responses that should be interpreted as maps) a map is encoded as an array with an even number of elements + // representing `(key, value)` pairs. + // + // As a result the `FromRedis` implementation for `HashMap`, `BTreeMap`, etc, took a dependency on this behavior. For example: https://redis.io/commands/hgetall#return-value + // + // ``` + // 127.0.0.1:6379> hset foo bar 0 + // (integer) 1 + // 127.0.0.1:6379> hset foo baz 1 + // (integer) 1 + // 127.0.0.1:6379> hgetall foo + // 1) "bar" + // 2) "0" + // 3) "baz" + // 4) "1" + // // now switch to RESP3 which has a specific type for maps on the wire + // 127.0.0.1:6379> hello 3 + // ... + // 127.0.0.1:6379> hgetall foo + // 1# "bar" => "0" + // 2# "baz" => "1" + // ``` + // + // However, with XREAD/XREADGROUP there's an extra array wrapper in RESP2 around both the "outer" map and "inner" map(s): + // + // ``` + // // RESP3 + // 127.0.0.1:6379> xread count 2 streams foo bar 1643479648480-0 1643479834990-0 + // 1# "foo" => 1) 1) "1643479650336-0" + // 2) 1) "count" + // 2) "3" + // 2# "bar" => 1) 1) "1643479837746-0" + // 2) 1) "count" + // 2) "5" + // 2) 1) "1643479925582-0" + // 2) 1) "count" + // 2) "6" + // + // // RESP2 + // 127.0.0.1:6379> xread count 2 streams foo bar 1643479648480-0 1643479834990-0 + // 1) 1) "foo" + // 2) 1) 1) "1643479650336-0" + // 2) 1) "count" + // 2) "3" + // 2) 1) "bar" + // 2) 1) 1) "1643479837746-0" + // 2) 1) "count" + // 2) "5" + // 2) 1) "1643479925582-0" + // 2) 1) "count" + // 2) "6" + // ``` + // + // In pseudo-Rust types: we expect `Vec` but instead get `Vec, Vec, ...>`. + // + // This left two choices: either make this specific use case (XREAD/XREADGROUP) easier with some utility functions and/or types, or try to add custom type conversion logic in `FromRedis` + // for this type of map encoding. + // + // There is a downside with the second approach outside of this use case though. It is possible for callers to write lua scripts that return pretty much anything. If we were to build in + // generic logic that modified response values in all cases when they matched this format then we could risk unexpected behavior for callers that just happen to write a lua script that + // returns this format. This is not likely to happen, but is still probably worth considering. + // + // Actually implementing that logic could also be pretty complicated and brittle. It's certainly possible, but seems like more trouble than it's worth when the issue only shows up with + // 2 commands out of hundreds. Additionally, we don't want to take away the ability for callers to manually declare the RESP2 structure as-is. + // + // This function (and `xreadgroup_map`) provide an easier but optional way to handle the encoding differences with the streams interface. + // + // The underlying functions that do the RESP2 vs RESP3 conversion are public for callers as well, so one could use a `BTreeMap` instead of a `HashMap` like so: + // + // ``` + // let value: BTreeMap)>> = client + // .xread::(None, None, "foo", "0") + // .await? + // .flatten_array_values(2) + // .convert()?; + // ``` + // + // Thanks for attending my TED talk. + fn xread_map( + &self, + count: Option, + block: Option, + keys: K, + ids: I, + ) -> AsyncResult> + where + Rk1: FromRedisKey + Hash + Eq + Unpin + Send, + Rk2: FromRedis + Unpin + Send, + Rk3: FromRedisKey + Hash + Eq + Unpin + Send, + Rv: FromRedis + Unpin + Send, + K: Into, + I: Into, + { + into!(keys, ids); + async_spawn(self, |inner| async move { + commands::streams::xread(&inner, count, block, keys, ids) + .await? + .into_xread_response() + }) + } + + /// Read data from one or multiple streams, only returning entries with an ID greater than the last received ID reported by the caller. + /// + /// + /// + /// **See [xread_map](Self::xread_map) for more information on a variation of this function that might be more useful.** + fn xread(&self, count: Option, block: Option, keys: K, ids: I) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + I: Into, + { + into!(keys, ids); + async_spawn(self, |inner| async move { + commands::streams::xread(&inner, count, block, keys, ids) + .await? + .convert() + }) + } + + /// This command creates a new consumer group uniquely identified by `groupname` for the stream stored at `key`. + /// + /// + fn xgroup_create(&self, key: K, groupname: S, id: I, mkstream: bool) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + S: Into, + I: Into, + { + into!(key, groupname, id); + async_spawn(self, |inner| async move { + commands::streams::xgroup_create(&inner, key, groupname, id, mkstream) + .await? + .convert() + }) + } + + /// Create a consumer named `consumername` in the consumer group `groupname` of the stream that's stored at `key`. + /// + /// + fn xgroup_createconsumer(&self, key: K, groupname: G, consumername: C) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + G: Into, + C: Into, + { + into!(key, groupname, consumername); + async_spawn(self, |inner| async move { + commands::streams::xgroup_createconsumer(&inner, key, groupname, consumername) + .await? + .convert() + }) + } + + /// Delete a consumer named `consumername` in the consumer group `groupname` of the stream that's stored at `key`. + /// + /// + fn xgroup_delconsumer(&self, key: K, groupname: G, consumername: C) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + G: Into, + C: Into, + { + into!(key, groupname, consumername); + async_spawn(self, |inner| async move { + commands::streams::xgroup_delconsumer(&inner, key, groupname, consumername) + .await? + .convert() + }) + } + + /// Completely destroy a consumer group. + /// + /// + fn xgroup_destroy(&self, key: K, groupname: S) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + S: Into, + { + into!(key, groupname); + async_spawn(self, |inner| async move { + commands::streams::xgroup_destroy(&inner, key, groupname) + .await? + .convert() + }) + } + + /// Set the last delivered ID for a consumer group. + /// + /// + fn xgroup_setid(&self, key: K, groupname: S, id: I) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + S: Into, + I: Into, + { + into!(key, groupname, id); + async_spawn(self, |inner| async move { + commands::streams::xgroup_setid(&inner, key, groupname, id) + .await? + .convert() + }) + } + + /// A special version of the `XREAD` command with support for consumer groups. + /// + /// Declaring proper type declarations for this command can be complicated due to the complex nature of the response values and the differences between RESP2 and RESP3. See the [xread](Self::xread) documentation for more information. + /// + /// + /// + /// The `XREAD` and `XREADGROUP` commands return values that can be interpreted differently in RESP2 and RESP3 mode. In many cases it is also easier to operate on the + /// return values of these functions as a `HashMap`, but manually declaring this type can be very verbose. This function will automatically convert the response to the + /// [most common](crate::types::XReadResponse) map representation while also handling the encoding differences between RESP2 and RESP3. + /// + /// See the [xread_map](Self::xread_map) documentation for more information. + // See the `xread_map` source docs for more information. + fn xreadgroup_map( + &self, + group: G, + consumer: C, + count: Option, + block: Option, + noack: bool, + keys: K, + ids: I, + ) -> AsyncResult> + where + Rk1: FromRedisKey + Hash + Eq + Unpin + Send, + Rk2: FromRedis + Unpin + Send, + Rk3: FromRedisKey + Hash + Eq + Unpin + Send, + Rv: FromRedis + Unpin + Send, + G: Into, + C: Into, + K: Into, + I: Into, + { + into!(group, consumer, keys, ids); + async_spawn(self, |inner| async move { + commands::streams::xreadgroup(&inner, group, consumer, count, block, noack, keys, ids) + .await? + .into_xread_response() + }) + } + + /// A special version of the `XREAD` command with support for consumer groups. + /// + /// Declaring proper type declarations for this command can be complicated due to the complex nature of the response values and the differences between RESP2 and RESP3. See the [xread](Self::xread) documentation for more information. + /// + /// + /// + /// **See [xreadgroup_map](Self::xreadgroup_map) for a variation of this function that might be more useful.** + fn xreadgroup( + &self, + group: G, + consumer: C, + count: Option, + block: Option, + noack: bool, + keys: K, + ids: I, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + G: Into, + C: Into, + K: Into, + I: Into, + { + into!(group, consumer, keys, ids); + async_spawn(self, |inner| async move { + commands::streams::xreadgroup(&inner, group, consumer, count, block, noack, keys, ids) + .await? + .convert() + }) + } + + /// Remove one or more messages from the Pending Entries List (PEL) of a stream consumer group. + /// + /// + fn xack(&self, key: K, group: G, ids: I) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + G: Into, + I: Into, + { + into!(key, group, ids); + async_spawn(self, |inner| async move { + commands::streams::xack(&inner, key, group, ids).await?.convert() + }) + } + + /// A variation of [xclaim](Self::xclaim) with a less verbose return type. + fn xclaim_values( + &self, + key: K, + group: G, + consumer: C, + min_idle_time: u64, + ids: I, + idle: Option, + time: Option, + retry_count: Option, + force: bool, + justid: bool, + ) -> AsyncResult>> + where + Ri: FromRedis + Unpin + Send, + Rk: FromRedisKey + Hash + Eq + Unpin + Send, + Rv: FromRedis + Unpin + Send, + K: Into, + G: Into, + C: Into, + I: Into, + { + into!(key, group, consumer, ids); + async_spawn(self, |inner| async move { + commands::streams::xclaim( + &inner, + key, + group, + consumer, + min_idle_time, + ids, + idle, + time, + retry_count, + force, + justid, + ) + .await? + .into_xread_value() + }) + } + + /// In the context of a stream consumer group, this command changes the ownership of a pending message, + /// so that the new owner is the consumer specified as the command argument. + /// + /// + /// + /// **See [xclaim_values](Self::xclaim_values) for a variation of this function that might be more useful.** + fn xclaim( + &self, + key: K, + group: G, + consumer: C, + min_idle_time: u64, + ids: I, + idle: Option, + time: Option, + retry_count: Option, + force: bool, + justid: bool, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + G: Into, + C: Into, + I: Into, + { + into!(key, group, consumer, ids); + async_spawn(self, |inner| async move { + commands::streams::xclaim( + &inner, + key, + group, + consumer, + min_idle_time, + ids, + idle, + time, + retry_count, + force, + justid, + ) + .await? + .convert() + }) + } + + /// This command transfers ownership of pending stream entries that match the specified criteria. It also converts the response type to a less verbose type declaration and handles potential differences between RESP2 and RESP3. + /// + /// + // FIXME: this type declaration wont work for Redis v7. Probably need a new FF for this... + fn xautoclaim_values( + &self, + key: K, + group: G, + consumer: C, + min_idle_time: u64, + start: I, + count: Option, + justid: bool, + ) -> AsyncResult<(String, Vec>)> + where + Ri: FromRedis + Unpin + Send, + Rk: FromRedisKey + Hash + Eq + Unpin + Send, + Rv: FromRedis + Unpin + Send, + K: Into, + G: Into, + C: Into, + I: Into, + { + into!(key, group, consumer, start); + async_spawn(self, |inner| async move { + commands::streams::xautoclaim(&inner, key, group, consumer, min_idle_time, start, count, justid) + .await? + .into_xautoclaim_values() + }) + } + + /// This command transfers ownership of pending stream entries that match the specified criteria. + /// + /// + /// + /// **Note: See [xautoclaim_values](Self::xautoclaim_values) for a variation of this function that may be more useful.** + fn xautoclaim( + &self, + key: K, + group: G, + consumer: C, + min_idle_time: u64, + start: I, + count: Option, + justid: bool, + ) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + G: Into, + C: Into, + I: Into, + { + into!(key, group, consumer, start); + async_spawn(self, |inner| async move { + commands::streams::xautoclaim(&inner, key, group, consumer, min_idle_time, start, count, justid) + .await? + .convert() + }) + } + + /// Inspect the list of pending messages in a consumer group. + /// + /// + fn xpending(&self, key: K, group: G, args: A) -> AsyncResult + where + R: FromRedis + Unpin + Send, + K: Into, + G: Into, + A: Into, + { + into!(key, group, args); + async_spawn(self, |inner| async move { + commands::streams::xpending(&inner, key, group, args).await?.convert() + }) + } +} diff --git a/src/commands/interfaces/strings.rs b/src/commands/interfaces/strings.rs new file mode 100644 index 00000000..e69de29b diff --git a/src/commands/interfaces/transactions.rs b/src/commands/interfaces/transactions.rs new file mode 100644 index 00000000..81fb21e9 --- /dev/null +++ b/src/commands/interfaces/transactions.rs @@ -0,0 +1,98 @@ +use crate::clients::TransactionClient; +use crate::commands; +use crate::error::{RedisError, RedisErrorKind}; +use crate::interfaces::{async_spawn, AsyncResult, ClientLike}; +use crate::modules::inner::MultiPolicy; +use crate::types::MultipleKeys; +use crate::utils; + +/// Functions that implement the [transactions](https://redis.io/commands#transactions) interface. +/// +/// See the [TransactionClient](crate::clients::TransactionClient) for more information; +pub trait TransactionInterface: ClientLike + Sized { + /// Enter a MULTI block, executing subsequent commands as a transaction. + /// + /// + /// + /// The `abort_on_error` flag indicates whether the client should automatically abort the transaction when an error is received from a command within + /// the transaction (i.e. the server responds with an error before `EXEC` is called). + /// + /// See for more information. If this flag is `false` then the caller will need to + /// `exec` or `discard` the transaction before either retrying or moving on to new commands outside the transaction. + /// + /// When used against a cluster the client will wait to send the `MULTI` command until the hash slot is known from a subsequent command. If no hash slot + /// is provided the transaction will run against a random cluster node. + // TODO make sure this works with multiple commands that don't have a hash slot + fn multi(&self, abort_on_error: bool) -> AsyncResult { + async_spawn(self, |inner| async move { + if utils::is_clustered(&inner.config) { + let policy = MultiPolicy { + hash_slot: None, + abort_on_error, + sent_multi: false, + }; + + if !utils::check_and_set_none(&inner.multi_block, policy) { + return Err(RedisError::new( + RedisErrorKind::InvalidCommand, + "Client is already within a MULTI transaction.", + )); + } + + debug!("{}: Defer MULTI command until hash slot is specified.", inner.id); + Ok(TransactionClient::from(&inner)) + } else { + let policy = MultiPolicy { + hash_slot: None, + abort_on_error, + sent_multi: true, + }; + if !utils::check_and_set_none(&inner.multi_block, policy) { + return Err(RedisError::new( + RedisErrorKind::InvalidCommand, + "Client is already within a MULTI transaction.", + )); + } + + commands::server::multi(&inner) + .await + .map(|_| TransactionClient::from(&inner)) + } + }) + } + + /// Whether or not the client is currently in the middle of a MULTI transaction. + fn in_transaction(&self) -> bool { + utils::is_locked_some(&self.inner().multi_block) + } + + /// Force the client to abort any in-flight transactions. + /// + /// The `Drop` trait on the [TransactionClient](crate::clients::TransactionClient) is not async and so callers that accidentally drop the transaction + /// client associated with a MULTI block before calling EXEC or DISCARD can use this function to exit the transaction. + /// A warning log line will be emitted if the transaction client is dropped before calling EXEC or DISCARD. + fn force_discard_transaction(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { commands::server::discard(&inner).await }) + } + + /// Marks the given keys to be watched for conditional execution of a transaction. + /// + /// + fn watch(&self, keys: K) -> AsyncResult<()> + where + K: Into, + { + into!(keys); + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::keys::watch(&inner, keys).await + }) + } + + /// Flushes all the previously watched keys for a transaction. + /// + /// + fn unwatch(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { commands::keys::unwatch(&inner).await }) + } +} diff --git a/src/commands/mod.rs b/src/commands/mod.rs index ee06dac1..b10e1f19 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -1,162 +1,4 @@ -use crate::error::RedisError; -use crate::modules::inner::RedisClientInner; -use crate::protocol::types::RedisCommandKind; -use crate::protocol::utils as protocol_utils; -use crate::types::RedisValue; -use crate::utils; -use std::sync::Arc; +mod impls; +pub mod interfaces; -pub static MATCH: &'static str = "MATCH"; -pub static COUNT: &'static str = "COUNT"; -pub static TYPE: &'static str = "TYPE"; -pub static CHANGED: &'static str = "CH"; -pub static INCR: &'static str = "INCR"; -pub static WITH_SCORES: &'static str = "WITHSCORES"; -pub static LIMIT: &'static str = "LIMIT"; -pub static AGGREGATE: &'static str = "AGGREGATE"; -pub static WEIGHTS: &'static str = "WEIGHTS"; -pub static GET: &'static str = "GET"; -pub static RESET: &'static str = "RESET"; -pub static TO: &'static str = "TO"; -pub static FORCE: &'static str = "FORCE"; -pub static ABORT: &'static str = "ABORT"; -pub static TIMEOUT: &'static str = "TIMEOUT"; -pub static LEN: &'static str = "LEN"; -pub static DB: &'static str = "DB"; -pub static REPLACE: &'static str = "REPLACE"; -pub static ID: &'static str = "ID"; -pub static ANY: &'static str = "ANY"; -pub static STORE: &'static str = "STORE"; -pub static WITH_VALUES: &'static str = "WITHVALUES"; -pub static SYNC: &'static str = "SYNC"; -pub static ASYNC: &'static str = "ASYNC"; -pub static RANK: &'static str = "RANK"; -pub static MAXLEN: &'static str = "MAXLEN"; -pub static REV: &'static str = "REV"; -pub static ABSTTL: &'static str = "ABSTTL"; -pub static IDLE_TIME: &'static str = "IDLETIME"; -pub static FREQ: &'static str = "FREQ"; - -/// Macro to generate a command function that takes no arguments and expects an OK response - returning `()` to the caller. -macro_rules! ok_cmd( - ($name:ident, $cmd:tt) => { - pub async fn $name(inner: &Arc) -> Result<(), RedisError> { - let frame = crate::utils::request_response(inner, || Ok((RedisCommandKind::$cmd, vec![]))).await?; - let response = crate::protocol::utils::frame_to_single_result(frame)?; - crate::protocol::utils::expect_ok(&response) - } - } -); - -/// Macro to generate a command function that takes no arguments and returns a single `RedisValue` to the caller. -macro_rules! simple_cmd( - ($name:ident, $cmd:tt, $res:ty) => { - pub async fn $name(inner: &Arc) -> Result<$res, RedisError> { - let frame = crate::utils::request_response(inner, || Ok((RedisCommandKind::$cmd, vec![]))).await?; - crate::protocol::utils::frame_to_single_result(frame) - } - } -); - -/// Macro to generate a command function that takes no arguments and returns a single `RedisValue` to the caller. -macro_rules! value_cmd( - ($name:ident, $cmd:tt) => { - simple_cmd!($name, $cmd, RedisValue); - } -); - -/// Macro to generate a command function that takes no arguments and returns a potentially nested `RedisValue` to the caller. -macro_rules! values_cmd( - ($name:ident, $cmd:tt) => { - pub async fn $name(inner: &Arc) -> Result { - let frame = crate::utils::request_response(inner, || Ok((RedisCommandKind::$cmd, vec![]))).await?; - crate::protocol::utils::frame_to_results(frame) - } - } -); - -/// A function that issues a command that only takes one argument and returns a single `RedisValue`. -pub async fn one_arg_value_cmd( - inner: &Arc, - kind: RedisCommandKind, - arg: RedisValue, -) -> Result { - let frame = utils::request_response(inner, move || Ok((kind, vec![arg]))).await?; - protocol_utils::frame_to_single_result(frame) -} - -/// A function that issues a command that only takes one argument and returns a potentially nested `RedisValue`. -pub async fn one_arg_values_cmd( - inner: &Arc, - kind: RedisCommandKind, - arg: RedisValue, -) -> Result { - let frame = utils::request_response(inner, move || Ok((kind, vec![arg]))).await?; - protocol_utils::frame_to_results(frame) -} - -/// A function that issues a command that only takes one argument and expects an OK response - returning `()` to the caller. -pub async fn one_arg_ok_cmd( - inner: &Arc, - kind: RedisCommandKind, - arg: RedisValue, -) -> Result<(), RedisError> { - let frame = utils::request_response(inner, move || Ok((kind, vec![arg]))).await?; - - let response = protocol_utils::frame_to_single_result(frame)?; - protocol_utils::expect_ok(&response) -} - -/// A function that issues a command that takes any number of arguments and returns a single `RedisValue` to the caller. -pub async fn args_value_cmd( - inner: &Arc, - kind: RedisCommandKind, - args: Vec, -) -> Result { - let frame = utils::request_response(inner, move || Ok((kind, args))).await?; - protocol_utils::frame_to_single_result(frame) -} - -/// A function that issues a command that takes any number of arguments and returns a potentially nested `RedisValue` to the caller. -pub async fn args_values_cmd( - inner: &Arc, - kind: RedisCommandKind, - args: Vec, -) -> Result { - let frame = utils::request_response(inner, move || Ok((kind, args))).await?; - protocol_utils::frame_to_results(frame) -} - -/// A function that issues a command that takes any number of arguments and expects an OK response - returning `()` to the caller. -pub async fn args_ok_cmd( - inner: &Arc, - kind: RedisCommandKind, - args: Vec, -) -> Result<(), RedisError> { - let frame = utils::request_response(inner, move || Ok((kind, args))).await?; - let response = protocol_utils::frame_to_single_result(frame)?; - protocol_utils::expect_ok(&response) -} - -pub mod acl; -pub mod client; -pub mod cluster; -pub mod config; -pub mod geo; -pub mod hashes; -pub mod hyperloglog; -pub mod keys; -pub mod lists; -pub mod lua; -pub mod memory; -pub mod pubsub; -pub mod scan; -pub mod server; -pub mod sets; -pub mod slowlog; -pub mod sorted_sets; -pub mod streams; -pub mod strings; - -#[cfg(feature = "sentinel-client")] -pub mod sentinel; +pub use impls::*; diff --git a/src/commands/sentinel.rs b/src/commands/sentinel.rs deleted file mode 100644 index 18f3aa8b..00000000 --- a/src/commands/sentinel.rs +++ /dev/null @@ -1,230 +0,0 @@ -use super::*; -use crate::error::RedisError; -use crate::modules::inner::RedisClientInner; -use crate::protocol::types::*; -use crate::protocol::utils as protocol_utils; -use crate::types::*; -use crate::utils; -use std::net::IpAddr; -use std::sync::Arc; - -pub async fn config_get(inner: &Arc, name: K) -> Result -where - K: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - Ok(( - RedisCommandKind::Sentinel, - vec!["CONFIG".into(), "GET".into(), name.into()], - )) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn config_set( - inner: &Arc, - name: K, - value: RedisValue, -) -> Result -where - K: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - Ok(( - RedisCommandKind::Sentinel, - vec!["CONFIG".into(), "SET".into(), name.into(), value], - )) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn ckquorum(inner: &Arc, name: N) -> Result -where - N: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - Ok((RedisCommandKind::Sentinel, vec!["CKQUORUM".into(), name.into()])) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn flushconfig(inner: &Arc) -> Result { - args_values_cmd(inner, RedisCommandKind::Sentinel, vec!["FLUSHCONFIG".into()]).await -} - -pub async fn failover(inner: &Arc, name: N) -> Result -where - N: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - Ok((RedisCommandKind::Sentinel, vec!["FAILOVER".into(), name.into()])) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn get_master_addr_by_name(inner: &Arc, name: N) -> Result -where - N: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - Ok(( - RedisCommandKind::Sentinel, - vec!["GET-MASTER-ADDR-BY-NAME".into(), name.into()], - )) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn info_cache(inner: &Arc) -> Result { - args_values_cmd(inner, RedisCommandKind::Sentinel, vec!["INFO-CACHE".into()]).await -} - -pub async fn masters(inner: &Arc) -> Result { - args_values_cmd(inner, RedisCommandKind::Sentinel, vec!["MASTERS".into()]).await -} - -pub async fn master(inner: &Arc, name: N) -> Result -where - N: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - Ok((RedisCommandKind::Sentinel, vec!["MASTER".into(), name.into()])) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn monitor( - inner: &Arc, - name: N, - ip: IpAddr, - port: u16, - quorum: u32, -) -> Result -where - N: Into, -{ - let (name, ip) = (name.into(), ip.to_string()); - let frame = utils::request_response(inner, move || { - Ok(( - RedisCommandKind::Sentinel, - vec!["MONITOR".into(), name.into(), ip.into(), port.into(), quorum.into()], - )) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn myid(inner: &Arc) -> Result { - args_values_cmd(inner, RedisCommandKind::Sentinel, vec!["MYID".into()]).await -} - -pub async fn pending_scripts(inner: &Arc) -> Result { - args_values_cmd(inner, RedisCommandKind::Sentinel, vec!["PENDING-SCRIPTS".into()]).await -} - -pub async fn remove(inner: &Arc, name: N) -> Result -where - N: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - Ok((RedisCommandKind::Sentinel, vec!["REMOVE".into(), name.into()])) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn replicas(inner: &Arc, name: N) -> Result -where - N: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - Ok((RedisCommandKind::Sentinel, vec!["REPLICAS".into(), name.into()])) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn sentinels(inner: &Arc, name: N) -> Result -where - N: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - Ok((RedisCommandKind::Sentinel, vec!["SENTINELS".into(), name.into()])) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn set(inner: &Arc, name: N, options: RedisMap) -> Result -where - N: Into, -{ - let name = name.into(); - let frame = utils::request_response(inner, move || { - let mut args = Vec::with_capacity(2 + options.len()); - args.push("SET".into()); - args.push(name.into()); - - for (key, value) in options.inner().into_iter() { - args.push(key.into()); - args.push(value); - } - Ok((RedisCommandKind::Sentinel, args)) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn simulate_failure( - inner: &Arc, - kind: SentinelFailureKind, -) -> Result { - let frame = utils::request_response(inner, move || { - Ok(( - RedisCommandKind::Sentinel, - vec!["SIMULATE-FAILURE".into(), kind.to_str().into()], - )) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} - -pub async fn reset

(inner: &Arc, pattern: P) -> Result -where - P: Into, -{ - let pattern = pattern.into(); - let frame = utils::request_response(inner, move || { - Ok((RedisCommandKind::Sentinel, vec!["RESET".into(), pattern.into()])) - }) - .await?; - - protocol_utils::frame_to_results(frame) -} diff --git a/src/commands/streams.rs b/src/commands/streams.rs deleted file mode 100644 index 1dac6def..00000000 --- a/src/commands/streams.rs +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/src/error.rs b/src/error.rs index 9954735d..f8ac0a90 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,5 @@ use crate::protocol::types::RedisCommand; +use bytes_utils::string::Utf8Error as BytesUtf8Error; use futures::channel::oneshot::Canceled; use redis_protocol::resp2::types::Frame as Resp2Frame; use redis_protocol::types::RedisProtocolError; @@ -11,6 +12,7 @@ use std::fmt::Display; use std::io::Error as IoError; use std::num::ParseFloatError; use std::num::ParseIntError; +use std::str; use std::str::Utf8Error; use std::string::FromUtf8Error; use tokio::task::JoinError; @@ -49,6 +51,8 @@ pub enum RedisErrorKind { Sentinel, /// An error indicating a value was not found, often used when trying to cast a `nil` response from the server to a non-nullable type. NotFound, + /// An error indicating that the caller should apply backpressure and retry the command. + Backpressure, } impl RedisErrorKind { @@ -69,6 +73,7 @@ impl RedisErrorKind { RedisErrorKind::Parse => "Parse Error", RedisErrorKind::Sentinel => "Sentinel Error", RedisErrorKind::NotFound => "Not Found", + RedisErrorKind::Backpressure => "Backpressure", } } } @@ -171,13 +176,19 @@ impl From for RedisError { impl From for RedisError { fn from(_: FromUtf8Error) -> Self { - RedisError::new(RedisErrorKind::Parse, "Invalid UTF8 string.") + RedisError::new(RedisErrorKind::Parse, "Invalid UTF-8 string.") } } impl From for RedisError { fn from(_: Utf8Error) -> Self { - RedisError::new(RedisErrorKind::Parse, "Invalid UTF8 string.") + RedisError::new(RedisErrorKind::Parse, "Invalid UTF-8 string.") + } +} + +impl From> for RedisError { + fn from(e: BytesUtf8Error) -> Self { + e.utf8_error().into() } } @@ -215,8 +226,8 @@ impl From for RedisError { impl From for RedisError { fn from(e: Resp2Frame) -> Self { match e { - Resp2Frame::SimpleString(s) => match s.as_ref() { - "Canceled" => RedisError::new_canceled(), + Resp2Frame::SimpleString(s) => match str::from_utf8(&s).ok() { + Some("Canceled") => RedisError::new_canceled(), _ => RedisError::new(RedisErrorKind::Unknown, "Unknown frame error."), }, _ => RedisError::new(RedisErrorKind::Unknown, "Unknown frame error."), diff --git a/src/interfaces.rs b/src/interfaces.rs new file mode 100644 index 00000000..5a8abfbb --- /dev/null +++ b/src/interfaces.rs @@ -0,0 +1,370 @@ +use crate::commands; +use crate::error::RedisError; +use crate::modules::inner::RedisClientInner; +use crate::multiplexer::{commands as multiplexer_commands, utils as multiplexer_utils}; +use crate::types::{ + ClientState, ConnectHandle, CustomCommand, FromRedis, InfoKind, ReconnectPolicy, RedisConfig, RedisValue, + ShutdownFlags, +}; +use crate::types::{PerformanceConfig, RespVersion}; +use crate::utils; +use futures::Stream; +pub use redis_protocol::resp3::types::Frame as Resp3Frame; +use std::convert::TryInto; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use tokio::sync::mpsc::unbounded_channel; +use tokio_stream::wrappers::UnboundedReceiverStream; + +// The motivation for these abstractions comes from the fact that the Redis interface is huge, and managing it all +// in one `RedisClient` struck is becoming unwieldy and unsustainable. Additionally, there are now use cases for +// supporting different client structs that implement subsets of the Redis interface where some of the underlying +// command implementations should be shared across multiple different client structs. +// +// For example, `SentinelClient` supports ACL commands and pubsub commands like a `RedisClient`, but also implements +// its own unique interface for sentinel nodes. However, it does *not* support zset commands, cluster commands, etc. +// In the future I would like the flexibility to add a `PubsubClient` for example, where it can share the same pubsub +// command interface as a `RedisClient`, but with some added features to manage subscriptions across reconnections +// automatically for callers. +// +// In an ideal world we could implement this with a trait for each section of the Redis interface. For example we +// could have an `AclInterface`, `ZsetInterface`, `HashInterface`, etc, where each interface implemented the command +// functions for that portion of the Redis interface. Since everything in this module revolves around a +// `RedisClientInner` this would be easy to do with a super trait that implemented a function to return this inner +// struct (`fn inner(&self) -> &Arc`). +// +// However, async functions are not supported in traits, which makes this much more difficult, and if we don't find +// some way to use traits to reuse portions of the Redis interface then we'll end up duplicating a lot of code, or +// relying on `Deref`, which has downsides in that it doesn't let you remove certain sections of the underlying interface. +// +// The abstractions implemented here are necessary because of this lack of async functions in traits. The +// `async-trait` crate exists, but it reverts to using trait objects (a la futures 0.1 pre `impl Trait`) and I'm +// not a fan of how it obfuscates your function signatures and uses boxes as often as it does. +// +// That being said, originally when I implemented this file I tried using a new tokio task for each call to `async_spawn`. This +// worked and provided a relatively clean implementation for `AsyncResult` and `AsyncInner` that didn't require relying on trait +// objects, but it dramatically reduced performance. Prior to the introduction of the new tokio task for each command the pipeline +// test benchmark could do ~2MM req/sec, but after adding the tokio task it could only do around 540k req/sec. +// +// After noticing this I went back and re-implemented this with trait objects to fix the performance issue. This ended up making the +// implementation very similar to `async-trait`, largely contradicting the paragraph above about boxes. However, the `AsyncResult` +// abstraction does have the benefit of being quite a bit more readable than the `Pin>` return type from `async-trait`, so +// I'm going to keep the `AsyncResult` abstraction in place for the time being for that reason alone. +// +// After switching from a new `tokio::spawn` call to trait objects the performance went back to about 1.85MM req/sec, which seems to be +// about as good as we can hope for without support for async functions in traits. While it would be nice to remove the overhead of the +// new trait object to get performance back to 2MM req/sec I think there's lower hanging fruit elsewhere in the code to tackle first that +// would have an even greater impact on performance. +// +// The `Send` requirement exists because the underlying future must be marked as `Send` for commands to work inside a `tokio::spawn` call. +// Some of the tests and examples do this for various reasons, and I don't want to prevent callers from implementing similar patterns. The +// only use case I can think of where this might be problematic is one where callers are using a custom, probably-too-clever hashing +// implementation with a `HashMap`, since all other `FromRedis` implementations are already for `Send` types. Aside from that I don't think +// the `Send` requirement should be an issue (especially since a lot of the tokio interface already requires it anyways). +// +// That being said, if anybody has issues with the `Send` requirement I'd be very interested to hear more about the use case. + +/// An enum used to represent the return value from a function that does some fallible synchronous work, +/// followed by some more fallible async logic inside a new tokio task. +enum AsyncInner { + Result(Option>), + Task(Pin> + Send + 'static>>), +} + +/// A wrapper type for return values from async functions implemented in a trait. +pub struct AsyncResult { + inner: AsyncInner, +} + +#[doc(hidden)] +impl From> for AsyncResult +where + T: Unpin + Send + 'static, + E: Into, +{ + fn from(value: Result) -> Self { + AsyncResult { + inner: AsyncInner::Result(Some(value.map_err(|e| e.into()))), + } + } +} + +#[doc(hidden)] +impl From> + Send + 'static>>> for AsyncResult +where + T: Unpin + Send + 'static, +{ + fn from(f: Pin> + Send + 'static>>) -> Self { + AsyncResult { + inner: AsyncInner::Task(f), + } + } +} + +impl Future for AsyncResult +where + T: Unpin + Send + 'static, +{ + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.get_mut().inner { + AsyncInner::Result(ref mut output) => { + if let Some(value) = output.take() { + Poll::Ready(value) + } else { + error!("Tried calling poll on an AsyncResult::Result more than once."); + Poll::Ready(Err(RedisError::new_canceled())) + } + } + AsyncInner::Task(ref mut fut) => Pin::new(fut).poll(cx), + } + } +} + +/// A wrapper type for async stream return values from functions implemented in a trait. +/// +/// This is used to work around the lack of `impl Trait` support in trait functions. +pub struct AsyncStream { + inner: UnboundedReceiverStream, +} + +impl Stream for AsyncStream +where + T: Unpin + Send + 'static, +{ + type Item = T; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.get_mut().inner).poll_next(cx) + } +} + +#[doc(hidden)] +impl From> for AsyncStream +where + T: Unpin + Send + 'static, +{ + fn from(rx: UnboundedReceiverStream) -> Self { + AsyncStream { inner: rx } + } +} + +/// Run a function in the context of an async block, returning an `AsyncResult` that wraps a trait object. +pub(crate) fn async_spawn(client: &C, func: F) -> AsyncResult +where + C: ClientLike, + Fut: Future> + Send + 'static, + F: FnOnce(Arc) -> Fut, + T: Unpin + Send + 'static, +{ + // this is unfortunate but necessary without async functions in traits + let inner = client.inner().clone(); + AsyncResult { + inner: AsyncInner::Task(Box::pin(func(inner))), + } +} + +/// Run a function and wrap the result in an `AsyncResult` trait object. +#[cfg(feature = "subscriber-client")] +pub(crate) fn wrap_async(func: F) -> AsyncResult +where + Fut: Future> + Send + 'static, + F: FnOnce() -> Fut, + T: Unpin + Send + 'static, +{ + AsyncResult { + inner: AsyncInner::Task(Box::pin(func())), + } +} + +/// Any Redis client that implements any part of the Redis interface. +pub trait ClientLike: Unpin + Send + Sync + Sized { + #[doc(hidden)] + fn inner(&self) -> &Arc; + + /// The unique ID identifying this client and underlying connections. + /// + /// All connections created by this client will use `CLIENT SETNAME` with this value. + fn id(&self) -> &Arc { + &self.inner().id + } + + /// Read the config used to initialize the client. + fn client_config(&self) -> RedisConfig { + utils::read_locked(&self.inner().config) + } + + /// Read the reconnect policy used to initialize the client. + fn client_reconnect_policy(&self) -> Option { + self.inner().policy.read().clone() + } + + /// Read the RESP version used by the client when communicating with the server. + fn protocol_version(&self) -> RespVersion { + self.inner().resp_version.as_ref().load().as_ref().clone() + } + + /// Whether or not the client has a reconnection policy. + fn has_reconnect_policy(&self) -> bool { + self.inner().policy.read().is_some() + } + + /// Whether or not the client will automatically pipeline commands. + fn is_pipelined(&self) -> bool { + self.inner().is_pipelined() + } + + /// Update the internal [PerformanceConfig](crate::types::PerformanceConfig) in place with new values. + fn update_perf_config(&self, config: PerformanceConfig) { + self.inner().perf_config.update(&config); + + let mut guard = self.inner().config.write(); + guard.performance = config; + } + + /// Read the state of the underlying connection(s). + /// + /// If running against a cluster the underlying state will reflect the state of the least healthy connection, if any. + fn state(&self) -> ClientState { + self.inner().state.read().clone() + } + + /// Whether or not the client has an active connection to the server(s). + fn is_connected(&self) -> bool { + *self.inner().state.read() == ClientState::Connected + } + + /// Connect to the Redis server with an optional reconnection policy. + /// + /// This function returns a `JoinHandle` to a task that drives the connection. It will not resolve + /// until the connection closes, and if a reconnection policy with unlimited attempts + /// is provided then the `JoinHandle` will run forever, or until `QUIT` is called. + /// + /// **Note:** See the [RedisConfig](crate::types::RedisConfig) documentation for more information on how the `policy` is applied to new connections. + fn connect(&self, policy: Option) -> ConnectHandle { + let inner = self.inner().clone(); + + tokio::spawn(async move { + let result = multiplexer_commands::init(&inner, policy).await; + if let Err(ref e) = result { + multiplexer_utils::emit_connect_error(&inner, e); + } + utils::set_client_state(&inner.state, ClientState::Disconnected); + result + }) + } + + /// Wait for the client to connect to the server, or return an error if the initial connection cannot be established. + /// If the client is already connected this future will resolve immediately. + /// + /// This can be used with `on_reconnect` to separate initialization logic that needs to occur only on the first connection attempt vs subsequent attempts. + fn wait_for_connect(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { utils::wait_for_connect(&inner).await }) + } + + /// Listen for protocol and connection errors. This stream can be used to more intelligently handle errors that may + /// not appear in the request-response cycle, and so cannot be handled by response futures. + /// + /// This function does not need to be called again if the connection closes. + fn on_error(&self) -> AsyncStream { + let (tx, rx) = unbounded_channel(); + self.inner().error_tx.write().push_back(tx); + + UnboundedReceiverStream::new(rx).into() + } + + /// Close the connection to the Redis server. The returned future resolves when the command has been written to the socket, + /// not when the connection has been fully closed. Some time after this future resolves the future returned by [connect](Self::connect) + /// will resolve which indicates that the connection has been fully closed. + /// + /// This function will also close all error, pubsub message, and reconnection event streams. + fn quit(&self) -> AsyncResult<()> { + async_spawn(self, |inner| async move { commands::server::quit(&inner).await }) + } + + /// Shut down the server and quit the client. + /// + /// + fn shutdown(&self, flags: Option) -> AsyncResult<()> { + async_spawn(self, |inner| async move { + utils::disallow_during_transaction(&inner)?; + commands::server::shutdown(&inner, flags).await + }) + } + + /// Ping the Redis server. + /// + /// + fn ping(&self) -> AsyncResult<()> { + async_spawn( + self, + |inner| async move { commands::server::ping(&inner).await?.convert() }, + ) + } + + /// Read info about the server. + /// + /// + fn info(&self, section: Option) -> AsyncResult + where + R: FromRedis + Unpin + Send, + { + async_spawn(self, |inner| async move { + commands::server::info(&inner, section).await?.convert() + }) + } + + /// Run a custom command that is not yet supported via another interface on this client. This is most useful when interacting with third party modules or extensions. + /// + /// This interface makes some assumptions about the nature of the provided command: + /// * For commands comprised of multiple command strings they must be separated by a space. + /// * The command string will be sent to the server exactly as written. + /// * Arguments will be sent in the order provided. + /// * When used against a cluster the caller must provide the correct hash slot to identify the cluster + /// node that should receive the command. If one is not provided the command will be sent to a random node + /// in the cluster. + /// + /// Callers should use the re-exported [redis_keyslot](crate::util::redis_keyslot) function to hash the command's key, if necessary. + /// + /// This interface should be used with caution as it may break the automatic pipeline features in the client if command flags are not properly configured. + fn custom(&self, cmd: CustomCommand, args: Vec) -> AsyncResult + where + R: FromRedis + Unpin + Send, + T: TryInto, + T::Error: Into, + { + let args = atry!(utils::try_into_vec(args)); + async_spawn(self, |inner| async move { + commands::server::custom(&inner, cmd, args).await?.convert() + }) + } + + /// Run a custom command similar to [custom](Self::custom), but return the response frame directly without any parsing. + /// + /// Note: RESP2 frames from the server are automatically converted to the RESP3 format when parsed by the client. + fn custom_raw(&self, cmd: CustomCommand, args: Vec) -> AsyncResult + where + T: TryInto, + T::Error: Into, + { + let args = atry!(utils::try_into_vec(args)); + async_spawn(self, |inner| async move { + commands::server::custom_raw(&inner, cmd, args).await + }) + } +} + +pub use crate::commands::interfaces::{ + acl::AclInterface, client::ClientInterface, cluster::ClusterInterface, config::ConfigInterface, geo::GeoInterface, + hashes::HashesInterface, hyperloglog::HyperloglogInterface, keys::KeysInterface, lists::ListInterface, + lua::LuaInterface, memory::MemoryInterface, metrics::MetricsInterface, pubsub::PubsubInterface, + server::AuthInterface, server::HeartbeatInterface, server::ServerInterface, sets::SetsInterface, + slowlog::SlowlogInterface, sorted_sets::SortedSetsInterface, streams::StreamsInterface, + transactions::TransactionInterface, +}; + +#[cfg(feature = "sentinel-client")] +pub use crate::commands::interfaces::sentinel::SentinelInterface; diff --git a/src/lib.rs b/src/lib.rs index 24da1651..894bb9b6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -45,12 +45,16 @@ //! //! See the [github repository](https://github.com/aembke/fred.rs) for more examples. //! +pub extern crate bytes; +pub extern crate bytes_utils; +#[cfg(feature = "serde-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde-json")))] +pub extern crate serde_json; + #[macro_use] extern crate async_trait; #[macro_use] extern crate log; -#[cfg(feature = "index-map")] -extern crate indexmap; #[cfg(feature = "enable-tls")] extern crate native_tls; #[cfg(feature = "enable-tls")] @@ -70,24 +74,47 @@ mod protocol; mod trace; mod utils; -/// The primary interface for communicating with the Redis server. -pub mod client; +/// Redis client implementations. +pub mod clients; /// Error structs returned by Redis commands. pub mod error; +/// Traits that implement portions of the Redis interface. +pub mod interfaces; /// An interface to run the `MONITOR` command. #[cfg(feature = "monitor")] #[cfg_attr(docsrs, doc(cfg(feature = "monitor")))] pub mod monitor; +/// The structs and enums used by the Redis client. +pub mod types; /// An interface for interacting directly with sentinel nodes. -#[cfg(feature = "sentinel-client")] -#[cfg_attr(docsrs, doc(cfg(feature = "sentinel-client")))] -pub mod sentinel; -pub use crate::modules::{globals, pool, types}; +/// Utility functions used by the client that may also be useful to callers. +pub mod util { + pub use crate::s; + pub use crate::utils::f64_to_redis_string; + pub use crate::utils::redis_string_to_f64; + pub use crate::utils::{static_bytes, static_str}; + pub use redis_protocol::redis_keyslot; + + /// Calculate the SHA1 hash output as a hex string. This is provided for clients that use the Lua interface to manage their own script caches. + pub fn sha1_hash(input: &str) -> String { + use sha1::Digest; + + let mut hasher = sha1::Sha1::new(); + hasher.update(input.as_bytes()); + format!("{:x}", hasher.finalize()) + } +} + +pub use crate::modules::{globals, pool}; -/// Convenience module to `use` a `RedisClient`, `RedisError`, and any argument types. +/// Convenience module to import a `RedisClient`, all possible interfaces, error types, and common argument types or return value types. pub mod prelude { - pub use crate::client::RedisClient; - pub use crate::error::RedisError; - pub use crate::types::*; + pub use crate::clients::RedisClient; + pub use crate::error::{RedisError, RedisErrorKind}; + pub use crate::interfaces::*; + pub use crate::types::{ + Blocking, Expiration, FromRedis, ReconnectPolicy, RedisConfig, RedisValue, RedisValueKind, ServerConfig, + SetOptions, + }; } diff --git a/src/macros.rs b/src/macros.rs index c6ab1ddd..26520c69 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -61,3 +61,89 @@ macro_rules! fspan ( crate::trace::Span {} } ); + +/// Async try! for `AsyncResult`. This is rarely used on its own, but rather as a part of try_into!. +macro_rules! atry ( + ($expr:expr) => { + match $expr { + Ok(val) => val, + Err(e) => return crate::interfaces::AsyncResult::from(Err(e)) + } + } +); + +macro_rules! static_str( + ($name:ident, $val:expr) => { + lazy_static::lazy_static! { + pub(crate) static ref $name: Str = { + crate::utils::static_str($val) + }; + } + } +); + +/// Public macro to create a `Str` from a static str slice without copying. +/// +/// ```rust no_run +/// // use "foo" without copying or parsing the underlying data. this uses the `Bytes::from_static` interface under the hood. +/// let _ = client.get(s!("foo")).await?; +/// ``` +#[macro_export] +macro_rules! s( + ($val:expr) => { + fred::util::static_str($val) + } +); + +/// Public macro to create a `Bytes` from a static byte slice without copying. +/// +/// ```rust no_run +/// // use "bar" without copying or parsing the underlying data. this uses the `Bytes::from_static` interface under the hood. +/// let _ = client.set(s!("foo"), b!(b"bar")).await?; +/// ``` +#[macro_export] +macro_rules! b( + ($val:expr) => { + fred::util::static_bytes($val) + } +); + +macro_rules! static_val( + ($val:expr) => { + RedisValue::from_static_str($val) + } +); + +macro_rules! into ( + ($val:ident) => (let $val = $val.into();); + ($v1:ident, $v2:ident) => ( + let ($v1, $v2) = ($v1.into(), $v2.into()); + ); + ($v1:ident, $v2:ident, $v3:ident) => ( + let ($v1, $v2, $v3) = ($v1.into(), $v2.into(), $v3.into()); + ); + ($v1:ident, $v2:ident, $v3:ident, $v4:ident) => ( + let ($v1, $v2, $v3, $v4) = ($v1.into(), $v2.into(), $v3.into(), $v4.into()); + ); + ($v1:ident, $v2:ident, $v3:ident, $v4:ident, $v5:ident) => ( + let ($v1, $v2, $v3, $v4, $v5) = ($v1.into(), $v2.into(), $v3.into(), $v4.into(), $v5.into()); + ); + // add to this as needed +); + +macro_rules! try_into ( + ($val:ident) => (let $val = atry!(to!($val));); + ($v1:ident, $v2:ident) => ( + let ($v1, $v2) = (atry!(to!($v1)), atry!(to!($v2))); + ); + ($v1:ident, $v2:ident, $v3:ident) => ( + let ($v1, $v2, $v3) = (atry!(to!($v1)), atry!(to!($v2)), atry!(to!($v3))); + ); + ($v1:ident, $v2:ident, $v3:ident, $v4:ident) => ( + let ($v1, $v2, $v3, $v4) = (atry!(to!($v1)), atry!(to!($v2)), atry!(to!($v3)), atry!(to!($v4))); + ); + ($v1:ident, $v2:ident, $v3:ident, $v4:ident, $v5:ident) => ( + let ($v1, $v2, $v3, $v4, $v5) = (atry!(to!($v1)), atry!(to!($v2)), atry!(to!($v3)), atry!(to!($v4)), atry!(to!($v5))); + ); + // add to this as needed +); diff --git a/src/modules/backchannel.rs b/src/modules/backchannel.rs index b7cdc769..06e0d182 100644 --- a/src/modules/backchannel.rs +++ b/src/modules/backchannel.rs @@ -3,10 +3,10 @@ use crate::modules::inner::RedisClientInner; use crate::multiplexer::ConnectionIDs; use crate::protocol::connection; use crate::protocol::connection::{FramedTcp, FramedTls, RedisTransport}; -use crate::protocol::types::RedisCommand; +use crate::protocol::types::{ProtocolFrame, RedisCommand}; use crate::protocol::utils as protocol_utils; use crate::types::Resolve; -use redis_protocol::resp2::types::Frame as ProtocolFrame; +use redis_protocol::resp3::types::Frame as Resp3Frame; use std::sync::Arc; async fn create_transport( @@ -30,17 +30,17 @@ async fn create_transport( fn map_tcp_response( result: Result<(ProtocolFrame, FramedTcp), (RedisError, FramedTcp)>, -) -> Result<(ProtocolFrame, RedisTransport), (RedisError, RedisTransport)> { +) -> Result<(Resp3Frame, RedisTransport), (RedisError, RedisTransport)> { result - .map(|(f, t)| (f, RedisTransport::Tcp(t))) + .map(|(f, t)| (f.into_resp3(), RedisTransport::Tcp(t))) .map_err(|(e, t)| (e, RedisTransport::Tcp(t))) } fn map_tls_response( result: Result<(ProtocolFrame, FramedTls), (RedisError, FramedTls)>, -) -> Result<(ProtocolFrame, RedisTransport), (RedisError, RedisTransport)> { +) -> Result<(Resp3Frame, RedisTransport), (RedisError, RedisTransport)> { result - .map(|(f, t)| (f, RedisTransport::Tls(t))) + .map(|(f, t)| (f.into_resp3(), RedisTransport::Tls(t))) .map_err(|(e, t)| (e, RedisTransport::Tls(t))) } @@ -105,8 +105,9 @@ impl Backchannel { host: &str, port: u16, uses_tls: bool, + use_blocked: bool, ) -> Result<(RedisTransport, Option>, bool), RedisError> { - if self.has_blocked_transport() { + if self.has_blocked_transport() && use_blocked { if let Some((transport, server)) = self.transport.take() { Ok((transport, Some(server), false)) } else { @@ -132,18 +133,22 @@ impl Backchannel { inner: &Arc, server: &Arc, command: RedisCommand, - ) -> Result { + use_blocked: bool, + ) -> Result { + let is_resp3 = inner.is_resp3(); let uses_tls = inner.config.read().uses_tls(); let (host, port) = protocol_utils::server_to_parts(server)?; - let (transport, _server, try_once) = self.take_or_create_transport(inner, host, port, uses_tls).await?; + let (transport, _server, try_once) = self + .take_or_create_transport(inner, host, port, uses_tls, use_blocked) + .await?; let server = _server.unwrap_or(server.clone()); let result = match transport { RedisTransport::Tcp(transport) => { - map_tcp_response(connection::request_response_safe(transport, &command).await) + map_tcp_response(connection::request_response_safe(transport, &command, is_resp3).await) } RedisTransport::Tls(transport) => { - map_tls_response(connection::request_response_safe(transport, &command).await) + map_tls_response(connection::request_response_safe(transport, &command, is_resp3).await) } }; @@ -159,13 +164,15 @@ impl Backchannel { Err(e) } else { // need to avoid async recursion - let (transport, _, _) = self.take_or_create_transport(inner, host, port, uses_tls).await?; + let (transport, _, _) = self + .take_or_create_transport(inner, host, port, uses_tls, use_blocked) + .await?; let result = match transport { RedisTransport::Tcp(transport) => { - map_tcp_response(connection::request_response_safe(transport, &command).await) + map_tcp_response(connection::request_response_safe(transport, &command, is_resp3).await) } RedisTransport::Tls(transport) => { - map_tls_response(connection::request_response_safe(transport, &command).await) + map_tls_response(connection::request_response_safe(transport, &command, is_resp3).await) } }; diff --git a/src/modules/globals.rs b/src/modules/globals.rs index 3042e7bc..e30ad47e 100644 --- a/src/modules/globals.rs +++ b/src/modules/globals.rs @@ -52,18 +52,6 @@ impl ReconnectError { /// Mutable globals that can be configured by the caller. pub(crate) struct Globals { - /// Max number of times a command can be written to the wire before returning an error. - pub(crate) max_command_attempts: Arc, - /// Max number of in-flight commands per socket before the caller receives backpressure. - pub(crate) backpressure_count: Arc, - /// Number of frames that can be fed into a socket before the socket must be flushed. - pub(crate) feed_count: Arc, - /// Minimum amount of time to wait when applying backpressure. - pub(crate) min_backpressure_time_ms: Arc, - /// Amount of time to wait before re-caching the cluster state when a MOVED or ASK error is detected. - pub(crate) cluster_error_cache_delay: Arc, - /// The default timeout to apply to commands, in ms. A value of 0 means no timeout. - pub(crate) default_command_timeout: Arc, /// The default timeout to apply to connections to sentinel nodes. pub(crate) sentinel_connection_timeout_ms: Arc, #[cfg(feature = "blocking-encoding")] @@ -77,12 +65,6 @@ pub(crate) struct Globals { impl Default for Globals { fn default() -> Self { Globals { - max_command_attempts: Arc::new(AtomicUsize::new(3)), - backpressure_count: Arc::new(AtomicUsize::new(5000)), - feed_count: Arc::new(AtomicUsize::new(500)), - min_backpressure_time_ms: Arc::new(AtomicUsize::new(100)), - cluster_error_cache_delay: Arc::new(AtomicUsize::new(100)), - default_command_timeout: Arc::new(AtomicUsize::new(0)), sentinel_connection_timeout_ms: Arc::new(AtomicUsize::new(200)), #[cfg(feature = "blocking-encoding")] blocking_encode_threshold: Arc::new(AtomicUsize::new(500_000)), @@ -101,30 +83,6 @@ impl Globals { read_atomic(&self.sentinel_connection_timeout_ms) } - pub fn max_command_attempts(&self) -> usize { - read_atomic(&self.max_command_attempts) - } - - pub fn backpressure_count(&self) -> usize { - read_atomic(&self.backpressure_count) - } - - pub fn feed_count(&self) -> usize { - read_atomic(&self.feed_count) - } - - pub fn min_backpressure_time_ms(&self) -> usize { - read_atomic(&self.min_backpressure_time_ms) - } - - pub fn cluster_error_cache_delay(&self) -> usize { - read_atomic(&self.cluster_error_cache_delay) - } - - pub fn default_command_timeout(&self) -> usize { - read_atomic(&self.default_command_timeout) - } - #[cfg(feature = "blocking-encoding")] pub fn blocking_encode_threshold(&self) -> usize { read_atomic(&self.blocking_encode_threshold) @@ -156,93 +114,6 @@ pub fn set_custom_reconnect_errors(prefixes: Vec) { *guard = prefixes; } -/// Read the default timeout applied to commands, in ms. A value of 0 means no timeout. -/// -/// Default: 0 -pub fn get_default_command_timeout() -> usize { - read_atomic(&globals().default_command_timeout) -} - -/// See [get_default_command_timeout] for more information. -pub fn set_default_command_timeout(val: usize) -> usize { - set_atomic(&globals().default_command_timeout, val) -} - -/// Read the amount of time the client will wait before caching the new layout of the cluster slots when it detects a MOVED or ASK error. -/// -/// Default: 100 ms -pub fn get_cluster_error_cache_delay_ms() -> usize { - read_atomic(&globals().cluster_error_cache_delay) -} - -/// See [get_cluster_error_cache_delay_ms] for more information. -pub fn set_cluster_error_cache_delay_ms(val: usize) -> usize { - set_atomic(&globals().cluster_error_cache_delay, val) -} - -/// Read the max number of attempts the client will make when attempting to write a command to the socket. -/// -/// A connection closing while a command is in flight can result in a command being written multiple times. -/// -/// Callers can set this to 1 to disable automatic command retry. -/// -/// Default: 3 -pub fn get_max_command_attempts() -> usize { - read_atomic(&globals().max_command_attempts) -} - -/// See [get_max_command_attempts] for more information. -pub fn set_max_command_attempts(val: usize) -> usize { - set_atomic(&globals().max_command_attempts, val) -} - -/// Read the maximum allowed number of in-flight commands per connection before backpressure is put on callers. -/// -/// Backpressure is handled automatically by the client without returning errors. This setting can drastically affect performance. -/// See [get_min_backpressure_time_ms] for more information on how to configure the backpressure `sleep` duration. -/// -/// The client will automatically [pipeline](https://redis.io/topics/pipelining) all commands to the server, and this setting can -/// be used to effectively disable pipelining by setting this to `1`. However, If the caller wants to avoid pipelining commands it's recommended -/// to use the `disable_pipeline` flag on the [connect](crate::client::RedisClient::connect) function instead since backpressure is probabilistic -/// while the `no_pipeline` flag is not. -/// -/// Default: 5000 -pub fn get_backpressure_count() -> usize { - read_atomic(&globals().backpressure_count) -} - -/// See [get_backpressure_count] for more information. -pub fn set_backpressure_count(val: usize) -> usize { - set_atomic(&globals().backpressure_count, val) -} - -/// Read the number of protocol frames that can be written to a socket with [feed](https://docs.rs/futures/0.3.14/futures/sink/trait.SinkExt.html#method.feed) -/// before the socket will be flushed once by using [send](https://docs.rs/futures/0.3.14/futures/sink/trait.SinkExt.html#method.send). -/// -/// Default: 500 -pub fn get_feed_count() -> usize { - read_atomic(&globals().feed_count) -} - -/// See [get_feed_count] for more information. -pub fn set_feed_count(val: usize) -> usize { - set_atomic(&globals().feed_count, val) -} - -/// Read the minimum amount of time the client will wait between writing commands when applying backpressure. -/// -/// Backpressure is only applied while the number of in-flight commands exceeds the [get_backpressure_count] value. -/// -/// Default: 100 ms -pub fn get_min_backpressure_time_ms() -> usize { - read_atomic(&globals().min_backpressure_time_ms) -} - -/// See [get_min_backpressure_time_ms] for more information. -pub fn set_min_backpressure_time_ms(val: usize) -> usize { - set_atomic(&globals().min_backpressure_time_ms, val) -} - /// The minimum size, in bytes, of frames that should be encoded or decoded with a blocking task. /// /// See [block_in_place](https://docs.rs/tokio/1.9.0/tokio/task/fn.block_in_place.html) for more information. diff --git a/src/modules/inner.rs b/src/modules/inner.rs index 71e42833..f5c4dcc1 100644 --- a/src/modules/inner.rs +++ b/src/modules/inner.rs @@ -1,4 +1,4 @@ -use crate::client::RedisClient; +use crate::clients::RedisClient; use crate::error::*; use crate::modules::backchannel::Backchannel; use crate::multiplexer::SentCommand; @@ -6,6 +6,7 @@ use crate::protocol::types::DefaultResolver; use crate::protocol::types::RedisCommand; use crate::types::*; use crate::utils; +use arc_swap::ArcSwap; use parking_lot::RwLock; use std::collections::VecDeque; use std::sync::atomic::AtomicUsize; @@ -58,9 +59,116 @@ impl MultiPolicy { } } +/// A lock-free internal representation of the performance config options from the `RedisConfig`. +#[derive(Debug)] +pub struct InternalPerfConfig { + pipeline: ArcSwap, + max_command_attempts: Arc, + default_command_timeout: Arc, + max_feed_count: Arc, + cluster_cache_update_delay_ms: Arc, + disable_auto_backpressure: ArcSwap, + disable_backpressure_scaling: ArcSwap, + min_sleep_duration: Arc, + max_in_flight_commands: Arc, +} + +impl<'a> From<&'a RedisConfig> for InternalPerfConfig { + fn from(config: &'a RedisConfig) -> Self { + InternalPerfConfig { + pipeline: ArcSwap::from(Arc::new(config.performance.pipeline)), + max_command_attempts: Arc::new(AtomicUsize::new(config.performance.max_command_attempts as usize)), + default_command_timeout: Arc::new(AtomicUsize::new(config.performance.default_command_timeout_ms as usize)), + max_feed_count: Arc::new(AtomicUsize::new(config.performance.max_feed_count as usize)), + cluster_cache_update_delay_ms: Arc::new(AtomicUsize::new( + config.performance.cluster_cache_update_delay_ms as usize, + )), + disable_auto_backpressure: ArcSwap::from(Arc::new(config.performance.backpressure.disable_auto_backpressure)), + disable_backpressure_scaling: ArcSwap::from(Arc::new( + config.performance.backpressure.disable_backpressure_scaling, + )), + min_sleep_duration: Arc::new(AtomicUsize::new( + config.performance.backpressure.min_sleep_duration_ms as usize, + )), + max_in_flight_commands: Arc::new(AtomicUsize::new( + config.performance.backpressure.max_in_flight_commands as usize, + )), + } + } +} + +impl InternalPerfConfig { + pub fn pipeline(&self) -> bool { + *self.pipeline.load().as_ref() + } + + pub fn max_command_attempts(&self) -> usize { + utils::read_atomic(&self.max_command_attempts) + } + + pub fn default_command_timeout(&self) -> usize { + utils::read_atomic(&self.default_command_timeout) + } + + pub fn max_feed_count(&self) -> usize { + utils::read_atomic(&self.max_feed_count) + } + + pub fn cluster_cache_update_delay_ms(&self) -> usize { + utils::read_atomic(&self.cluster_cache_update_delay_ms) + } + + pub fn disable_auto_backpressure(&self) -> bool { + *self.disable_auto_backpressure.load().as_ref() + } + + pub fn disable_backpressure_scaling(&self) -> bool { + *self.disable_backpressure_scaling.load().as_ref() + } + + pub fn min_sleep_duration(&self) -> usize { + utils::read_atomic(&self.min_sleep_duration) + } + + pub fn max_in_flight_commands(&self) -> usize { + utils::read_atomic(&self.max_in_flight_commands) + } + + pub fn update(&self, config: &PerformanceConfig) { + self.pipeline.store(Arc::new(config.pipeline)); + self + .disable_backpressure_scaling + .store(Arc::new(config.backpressure.disable_backpressure_scaling)); + self + .disable_auto_backpressure + .store(Arc::new(config.backpressure.disable_auto_backpressure)); + + utils::set_atomic(&self.max_command_attempts, config.max_command_attempts as usize); + utils::set_atomic( + &self.default_command_timeout, + config.default_command_timeout_ms as usize, + ); + utils::set_atomic(&self.max_feed_count, config.max_feed_count as usize); + utils::set_atomic( + &self.cluster_cache_update_delay_ms, + config.cluster_cache_update_delay_ms as usize, + ); + utils::set_atomic( + &self.min_sleep_duration, + config.backpressure.min_sleep_duration_ms as usize, + ); + utils::set_atomic( + &self.max_in_flight_commands, + config.backpressure.max_in_flight_commands as usize, + ); + } +} + pub struct RedisClientInner { /// The client ID as seen by the server. pub id: Arc, + /// The RESP version used by the underlying connections. + pub resp_version: Arc>, /// The response policy to apply when the client is in a MULTI block. pub multi_block: RwLock>, /// The state of the underlying connection. @@ -81,6 +189,8 @@ pub struct RedisClientInner { pub keyspace_tx: RwLock>>, /// An mpsc sender for reconnection events to `on_reconnect` streams. pub reconnect_tx: RwLock>>, + /// An mpsc sender for cluster change notifications. + pub cluster_change_tx: RwLock>>>, /// MPSC senders for `on_connect` futures. pub connect_tx: RwLock>>>, /// A join handle for the task that sleeps waiting to reconnect. @@ -99,6 +209,8 @@ pub struct RedisClientInner { pub backchannel: Arc>, /// The server host/port resolved from the sentinel nodes, if known. pub sentinel_primary: RwLock>>, + /// The internal representation of the performance config options from the `RedisConfig`. + pub perf_config: Arc, /// Command latency metrics. #[cfg(feature = "metrics")] @@ -120,6 +232,8 @@ impl RedisClientInner { let id = Arc::new(format!("fred-{}", utils::random_string(10))); let resolver = DefaultResolver::new(&id); let (command_tx, command_rx) = unbounded_channel(); + let version = config.version.clone(); + let perf_config = InternalPerfConfig::from(&config); Arc::new(RedisClientInner { #[cfg(feature = "metrics")] @@ -131,6 +245,8 @@ impl RedisClientInner { #[cfg(feature = "metrics")] res_size_stats: Arc::new(RwLock::new(MovingStats::default())), + resp_version: Arc::new(ArcSwap::from(Arc::new(version))), + perf_config: Arc::new(perf_config), config: RwLock::new(config), policy: RwLock::new(None), state: RwLock::new(ClientState::Disconnected), @@ -138,6 +254,7 @@ impl RedisClientInner { message_tx: RwLock::new(VecDeque::new()), keyspace_tx: RwLock::new(VecDeque::new()), reconnect_tx: RwLock::new(VecDeque::new()), + cluster_change_tx: RwLock::new(VecDeque::new()), connect_tx: RwLock::new(VecDeque::new()), reconnect_sleep_jh: RwLock::new(None), cmd_buffer_len: Arc::new(AtomicUsize::new(0)), @@ -155,7 +272,7 @@ impl RedisClientInner { } pub fn is_pipelined(&self) -> bool { - self.config.read().pipeline + self.perf_config.pipeline() } pub fn log_client_name_fn(&self, level: log::Level, func: F) @@ -203,4 +320,17 @@ impl RedisClientInner { let mut guard = self.command_rx.write(); *guard = Some(rx); } + + pub fn is_resp3(&self) -> bool { + *self.resp_version.as_ref().load().as_ref() == RespVersion::RESP3 + } + + pub fn switch_protocol_versions(&self, version: RespVersion) { + self.resp_version.as_ref().store(Arc::new(version)) + } + + pub fn reset_protocol_version(&self) { + let version = self.config.read().version.clone(); + self.resp_version.as_ref().store(Arc::new(version)); + } } diff --git a/src/modules/metrics.rs b/src/modules/metrics.rs index 5715708b..01f51cfb 100644 --- a/src/modules/metrics.rs +++ b/src/modules/metrics.rs @@ -4,6 +4,8 @@ use std::cmp; /// Stats describing a distribution of samples. +/// +/// Time units are in milliseconds, data size units are in bytes. pub struct Stats { pub min: i64, pub max: i64, @@ -14,6 +16,8 @@ pub struct Stats { } /// Struct for tracking moving stats about network latency or request/response sizes. +/// +/// Time units are in milliseconds, data size units are in bytes. pub struct MovingStats { pub min: i64, pub max: i64, diff --git a/src/modules/mod.rs b/src/modules/mod.rs index 65419d1b..6c5ffabc 100644 --- a/src/modules/mod.rs +++ b/src/modules/mod.rs @@ -6,5 +6,3 @@ pub mod metrics; /// Client pooling structs. pub mod pool; pub mod response; -/// The structs and enums used by the Redis client. -pub mod types; diff --git a/src/modules/pool.rs b/src/modules/pool.rs index d05f358b..b2740b79 100644 --- a/src/modules/pool.rs +++ b/src/modules/pool.rs @@ -1,261 +1,35 @@ -use crate::client::RedisClient; +use crate::clients::RedisClient; use crate::error::{RedisError, RedisErrorKind}; +use crate::interfaces::ClientLike; use crate::types::{ConnectHandle, ReconnectPolicy, RedisConfig}; use crate::utils; use futures::future::{join_all, try_join_all}; -use parking_lot::RwLock; use std::fmt; use std::ops::Deref; use std::sync::atomic::AtomicUsize; use std::sync::Arc; -use tokio::sync::Mutex as AsyncMutex; -/// The inner state used by a `DynamicRedisPool`. -pub(crate) struct DynamicPoolInner { - clients: RwLock>, - last: Arc, - config: RedisConfig, - policy: Option, - connect_guard: AsyncMutex<()>, -} - -/// A struct to pool multiple Redis clients together into one interface that will round-robin requests among clients, -/// preferring clients with an active connection if specified. -/// -/// This module supports scaling operations at runtime but cannot use the `Deref` trait to dereference the pool to a client. -#[derive(Clone)] -pub struct DynamicRedisPool { - inner: Arc, -} - -impl fmt::Display for DynamicRedisPool { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "[Dynamic Redis Pool]") - } -} - -impl<'a> From<&'a DynamicRedisPool> for RedisClient { - fn from(p: &'a DynamicRedisPool) -> RedisClient { - p.next() - } -} - -impl DynamicRedisPool { - /// Create a new pool without connecting to the server. - pub fn new(config: RedisConfig, policy: Option, size: usize, max_size: usize) -> DynamicRedisPool { - let mut clients = Vec::with_capacity(max_size); - for _ in 0..size { - clients.push(RedisClient::new(config.clone())); - } - - DynamicRedisPool { - inner: Arc::new(DynamicPoolInner { - clients: RwLock::new(clients), - last: Arc::new(AtomicUsize::new(0)), - connect_guard: AsyncMutex::new(()), - config, - policy, - }), - } - } - - /// Connect each client in the pool to the server, returning the task driving the connection for each client. - /// - /// The caller is responsible for calling `wait_for_connect` or any `on_*` functions on each client. - pub async fn connect(&self) -> Vec { - let _guard = self.inner.connect_guard.lock().await; - - self - .inner - .clients - .read() - .iter() - .map(|c| c.connect(self.inner.policy.clone())) - .collect() - } - - /// Wait for all the clients to connect to the server. - pub async fn wait_for_connect(&self) -> Result<(), RedisError> { - debug!("Connecting via dynamic pool..."); - let clients = self.clients(); - let futures = clients.iter().map(|client| client.wait_for_connect()); - let _ = try_join_all(futures).await?; - - Ok(()) - } - - /// Read the client that should run the next command. - #[cfg(feature = "pool-prefer-active")] - pub fn next(&self) -> RedisClient { - let clients_guard = self.inner.clients.read(); - let clients_ref = &*clients_guard; - - if clients_ref.is_empty() { - warn!("Attempted to read a client from an empty redis pool."); - return RedisClient::new(self.inner.config.clone()); - } - - let num_clients = clients_ref.len(); - let mut last_idx = utils::incr_atomic(&self.inner.last) % num_clients; - - for _ in 0..num_clients { - let client = &clients_ref[last_idx]; - if client.is_connected() { - return client.clone(); - } - last_idx = (last_idx + 1) % num_clients; - } - - clients_ref[last_idx].clone() - } - - /// Read the client that should run the next command. - #[cfg(not(feature = "pool-prefer-active"))] - pub fn next(&self) -> RedisClient { - let clients_guard = self.inner.clients.read(); - let clients_ref = &*clients_guard; - - if clients_ref.is_empty() { - warn!("Attempted to read a client from an empty redis pool."); - RedisClient::new(self.inner.config.clone()) - } else { - clients_ref[utils::incr_atomic(&self.inner.last) % clients_ref.len()].clone() - } - } - - /// Read the client that should run the next command, creating a new client first if the pool is empty. - pub async fn next_connect(&self, wait_for_connect: bool) -> RedisClient { - if has_clients(&self.inner.clients) { - self.next() - } else { - let (client, _) = self.scale_up().await; - if wait_for_connect { - let _ = client.wait_for_connect().await; - } - client - } - } - - /// Read the client that ran the last command. - pub fn last(&self) -> RedisClient { - let clients_guard = self.inner.clients.read(); - let clients_ref = &*clients_guard; - - if clients_ref.is_empty() { - warn!("Attempted to read a client from an empty redis pool."); - RedisClient::new(self.inner.config.clone()) - } else { - clients_ref[utils::read_atomic(&self.inner.last) % clients_ref.len()].clone() - } - } - - /// Return a list of clients that have an active, healthy connection to the server. - pub fn connected_clients(&self) -> Vec { - self - .inner - .clients - .read() - .iter() - .filter_map(|client| { - if client.is_connected() { - Some(client.clone()) - } else { - None - } - }) - .collect() - } - - /// Return a list of clients that are not in a healthy, connected state. - pub fn disconnected_clients(&self) -> Vec { - self - .inner - .clients - .read() - .iter() - .filter_map(|client| { - if client.is_connected() { - None - } else { - Some(client.clone()) - } - }) - .collect() - } - - /// An iterator over the inner client array. - pub fn clients(&self) -> Vec { - self.inner.clients.read().iter().map(|c| c.clone()).collect() - } - - /// Read the number of clients in the pool. - pub fn size(&self) -> usize { - self.inner.clients.read().len() - } - - /// Call `QUIT` on each client in the pool. - pub async fn quit_pool(&self) { - let clients = self.clients(); - let futures = clients.iter().map(|c| c.quit()); - let _ = join_all(futures).await; - } - - /// Add a client to the pool, using the same config and reconnection policy from the initial connections. - pub async fn scale_up(&self) -> (RedisClient, ConnectHandle) { - let _guard = self.inner.connect_guard.lock().await; - - let client = RedisClient::new(self.inner.config.clone()); - let connection = client.connect(self.inner.policy.clone()); - self.inner.clients.write().push(client.clone()); - - (client, connection) - } - - /// Remove a client from the pool, optionally using `QUIT` after removing it from the pool. - /// - /// A new, uninitialized client will be used if the caller tries to read a client from an empty pool. - pub async fn scale_down(&self, quit: bool) -> Option { - let _guard = self.inner.connect_guard.lock().await; - - if let Some(client) = self.inner.clients.write().pop() { - if quit { - let _ = client.quit().await; - } - Some(client) - } else { - None - } - } -} - -fn has_clients(clients: &RwLock>) -> bool { - clients.read().len() > 0 -} - -/// The inner state used by a `StaticRedisPool`. +/// The inner state used by a `RedisPool`. #[derive(Clone)] -pub(crate) struct StaticRedisPoolInner { +pub(crate) struct RedisPoolInner { clients: Vec, last: Arc, } /// A struct to pool multiple Redis clients together into one interface that will round-robin requests among clients, /// preferring clients with an active connection if specified. -/// -/// This module does not support modifications to the pool after initialization, but does automatically dereference -/// to the next client that should run a command. #[derive(Clone)] -pub struct StaticRedisPool { - inner: Arc, +pub struct RedisPool { + inner: Arc, } -impl fmt::Display for StaticRedisPool { +impl fmt::Display for RedisPool { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "[Static Redis Pool]") + write!(f, "[Redis Pool]") } } -impl Deref for StaticRedisPool { +impl Deref for RedisPool { type Target = RedisClient; fn deref(&self) -> &Self::Target { @@ -263,21 +37,21 @@ impl Deref for StaticRedisPool { } } -impl<'a> From<&'a StaticRedisPool> for &'a RedisClient { - fn from(p: &'a StaticRedisPool) -> &'a RedisClient { +impl<'a> From<&'a RedisPool> for &'a RedisClient { + fn from(p: &'a RedisPool) -> &'a RedisClient { p.next() } } -impl<'a> From<&'a StaticRedisPool> for RedisClient { - fn from(p: &'a StaticRedisPool) -> RedisClient { +impl<'a> From<&'a RedisPool> for RedisClient { + fn from(p: &'a RedisPool) -> RedisClient { p.next().clone() } } -impl StaticRedisPool { +impl RedisPool { /// Create a new pool without connecting to the server. - pub fn new(config: RedisConfig, size: usize) -> Result { + pub fn new(config: RedisConfig, size: usize) -> Result { if size > 0 { let mut clients = Vec::with_capacity(size); for _ in 0..size { @@ -285,8 +59,8 @@ impl StaticRedisPool { } let last = Arc::new(AtomicUsize::new(0)); - Ok(StaticRedisPool { - inner: Arc::new(StaticRedisPoolInner { clients, last }), + Ok(RedisPool { + inner: Arc::new(RedisPoolInner { clients, last }), }) } else { Err(RedisError::new(RedisErrorKind::Config, "Pool cannot be empty.")) diff --git a/src/modules/response.rs b/src/modules/response.rs index 0815d353..aba61355 100644 --- a/src/modules/response.rs +++ b/src/modules/response.rs @@ -1,12 +1,29 @@ use crate::error::{RedisError, RedisErrorKind}; -use crate::types::{RedisValue, QUEUED}; +use crate::types::{RedisKey, RedisValue, QUEUED}; +use bytes::Bytes; +use bytes_utils::Str; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::hash::{BuildHasher, Hash}; -use std::str::FromStr; + +#[cfg(feature = "serde-json")] +use crate::utils; +#[cfg(feature = "serde-json")] +use serde_json::{Map, Value}; + +macro_rules! debug_type( + ($($arg:tt)*) => { + cfg_if::cfg_if! { + if #[cfg(feature="network-logs")] { + log::trace!($($arg)*); + } + } + } +); macro_rules! to_signed_number( ($t:ty, $v:expr) => { match $v { + RedisValue::Double(f) => Ok(f as $t), RedisValue::Integer(i) => Ok(i as $t), RedisValue::String(s) => s.parse::<$t>().map_err(|e| e.into()), RedisValue::Null => Err(RedisError::new(RedisErrorKind::NotFound, "Cannot convert nil to number.")), @@ -28,6 +45,11 @@ macro_rules! to_signed_number( macro_rules! to_unsigned_number( ($t:ty, $v:expr) => { match $v { + RedisValue::Double(f) => if f.is_sign_negative() { + Err(RedisError::new_parse("Cannot convert from negative number.")) + }else{ + Ok(f as $t) + }, RedisValue::Integer(i) => if i < 0 { Err(RedisError::new_parse("Cannot convert from negative number.")) }else{ @@ -56,7 +78,7 @@ macro_rules! to_unsigned_number( macro_rules! impl_signed_number ( ($t:ty) => { - impl RedisResponse for $t { + impl FromRedis for $t { fn from_value(value: RedisValue) -> Result<$t, RedisError> { to_signed_number!($t, value) } @@ -66,7 +88,7 @@ macro_rules! impl_signed_number ( macro_rules! impl_unsigned_number ( ($t:ty) => { - impl RedisResponse for $t { + impl FromRedis for $t { fn from_value(value: RedisValue) -> Result<$t, RedisError> { to_unsigned_number!($t, value) } @@ -74,8 +96,10 @@ macro_rules! impl_unsigned_number ( } ); -/// A trait used to [convert](crate::modules::types::RedisValue::convert) various forms of [RedisValue](crate::modules::types::RedisValue) into different types. -pub trait RedisResponse: Sized { +/// A trait used to convert various forms of [RedisValue](crate::types::RedisValue) into different types. +/// +/// See the [convert](crate::types::RedisValue::convert) documentation for important information regarding performance considerations and examples. +pub trait FromRedis: Sized { fn from_value(value: RedisValue) -> Result; #[doc(hidden)] @@ -85,18 +109,22 @@ pub trait RedisResponse: Sized { #[doc(hidden)] // FIXME if/when specialization is stable - fn from_bytes(_: Vec) -> Option> { + fn from_owned_bytes(_: Vec) -> Option> { None } + #[doc(hidden)] + fn is_tuple() -> bool { + false + } } -impl RedisResponse for RedisValue { +impl FromRedis for RedisValue { fn from_value(value: RedisValue) -> Result { Ok(value) } } -impl RedisResponse for () { +impl FromRedis for () { fn from_value(_: RedisValue) -> Result { Ok(()) } @@ -109,13 +137,13 @@ impl_signed_number!(i64); impl_signed_number!(i128); impl_signed_number!(isize); -impl RedisResponse for u8 { +impl FromRedis for u8 { fn from_value(value: RedisValue) -> Result { to_unsigned_number!(u8, value) } - fn from_bytes(v: Vec) -> Option> { - Some(v) + fn from_owned_bytes(d: Vec) -> Option> { + Some(d) } } @@ -125,8 +153,9 @@ impl_unsigned_number!(u64); impl_unsigned_number!(u128); impl_unsigned_number!(usize); -impl RedisResponse for String { +impl FromRedis for String { fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(String): {:?}", value); if value.is_null() { Err(RedisError::new( RedisErrorKind::NotFound, @@ -140,8 +169,25 @@ impl RedisResponse for String { } } -impl RedisResponse for f64 { +impl FromRedis for Str { + fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(Str): {:?}", value); + if value.is_null() { + Err(RedisError::new( + RedisErrorKind::NotFound, + "Cannot convert nil response to string.", + )) + } else { + value + .into_bytes_str() + .ok_or(RedisError::new_parse("Could not convert to string.")) + } + } +} + +impl FromRedis for f64 { fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(f64): {:?}", value); if value.is_null() { Err(RedisError::new( RedisErrorKind::NotFound, @@ -155,8 +201,9 @@ impl RedisResponse for f64 { } } -impl RedisResponse for f32 { +impl FromRedis for f32 { fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(f32): {:?}", value); if value.is_null() { Err(RedisError::new( RedisErrorKind::NotFound, @@ -171,8 +218,9 @@ impl RedisResponse for f32 { } } -impl RedisResponse for bool { +impl FromRedis for bool { fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(bool): {:?}", value); if value.is_null() { Err(RedisError::new( RedisErrorKind::NotFound, @@ -186,11 +234,12 @@ impl RedisResponse for bool { } } -impl RedisResponse for Option +impl FromRedis for Option where - T: RedisResponse, + T: FromRedis, { fn from_value(value: RedisValue) -> Result, RedisError> { + debug_type!("FromRedis(Option): {:?}", value); if value.is_null() { Ok(None) } else { @@ -199,17 +248,30 @@ where } } -impl RedisResponse for Vec +impl FromRedis for Bytes { + fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(Bytes): {:?}", value); + value + .into_bytes() + .ok_or(RedisError::new_parse("Cannot parse into bytes.")) + } +} + +impl FromRedis for Vec where - T: RedisResponse, + T: FromRedis, { fn from_value(value: RedisValue) -> Result, RedisError> { + debug_type!("FromRedis(Vec): {:?}", value); match value { - RedisValue::Bytes(bytes) => T::from_bytes(bytes).ok_or(RedisError::new_parse("Cannot convert from bytes")), + RedisValue::Bytes(bytes) => { + T::from_owned_bytes(bytes.to_vec()).ok_or(RedisError::new_parse("Cannot convert from bytes")) + } RedisValue::String(string) => { // hacky way to check if T is bytes without consuming `string` - if T::from_bytes(vec![]).is_some() { - T::from_bytes(string.into_bytes()).ok_or(RedisError::new_parse("Could not convert string to bytes.")) + if T::from_owned_bytes(vec![]).is_some() { + T::from_owned_bytes(string.into_inner().to_vec()) + .ok_or(RedisError::new_parse("Could not convert string to bytes.")) } else { Ok(vec![T::from_value(RedisValue::String(string))?]) } @@ -220,26 +282,39 @@ where let out = Vec::with_capacity(map.len() * 2); map.inner().into_iter().fold(Ok(out), |out, (key, value)| { out.and_then(|mut out| { - out.push(T::from_value(RedisValue::String(key))?); - out.push(T::from_value(value)?); + if T::is_tuple() { + // try to convert to a 2-element tuple since that's a common use case from `HGETALL`, etc + out.push(T::from_value(RedisValue::Array(vec![key.into(), value]))?); + } else { + out.push(T::from_value(key.into())?); + out.push(T::from_value(value)?); + } + Ok(out) }) }) } RedisValue::Null => Ok(vec![]), RedisValue::Integer(i) => Ok(vec![T::from_value(RedisValue::Integer(i))?]), - RedisValue::Queued => Ok(vec![T::from_value(RedisValue::String(QUEUED.into()))?]), + RedisValue::Double(f) => Ok(vec![T::from_value(RedisValue::Double(f))?]), + RedisValue::Boolean(b) => Ok(vec![T::from_value(RedisValue::Boolean(b))?]), + RedisValue::Queued => Ok(vec![T::from_value(RedisValue::from_static_str(QUEUED))?]), } } } -impl RedisResponse for HashMap +impl FromRedis for HashMap where - K: FromStr + Eq + Hash, - V: RedisResponse, + K: FromRedisKey + Eq + Hash, + V: FromRedis, S: BuildHasher + Default, { fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(HashMap): {:?}", value); + if value.is_null() { + return Err(RedisError::new(RedisErrorKind::NotFound, "Cannot convert nil to map.")); + } + let as_map = if value.is_array() || value.is_map() { value .into_map() @@ -251,33 +326,29 @@ where as_map .inner() .into_iter() - .map(|(k, v)| { - Ok(( - k.parse::() - .map_err(|_| RedisError::new_parse("Cannot convert key."))?, - V::from_value(v)?, - )) - }) + .map(|(k, v)| Ok((K::from_key(k)?, V::from_value(v)?))) .collect() } } -impl RedisResponse for HashSet +impl FromRedis for HashSet where - V: RedisResponse + Hash + Eq, + V: FromRedis + Hash + Eq, S: BuildHasher + Default, { fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(HashSet): {:?}", value); value.into_array().into_iter().map(|v| V::from_value(v)).collect() } } -impl RedisResponse for BTreeMap +impl FromRedis for BTreeMap where - K: FromStr + Ord, - V: RedisResponse, + K: FromRedisKey + Ord, + V: FromRedis, { fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(BTreeMap): {:?}", value); let as_map = if value.is_array() || value.is_map() { value .into_map() @@ -289,37 +360,37 @@ where as_map .inner() .into_iter() - .map(|(k, v)| { - Ok(( - k.parse::() - .map_err(|_| RedisError::new_parse("Cannot convert key."))?, - V::from_value(v)?, - )) - }) + .map(|(k, v)| Ok((K::from_key(k)?, V::from_value(v)?))) .collect() } } -impl RedisResponse for BTreeSet +impl FromRedis for BTreeSet where - V: RedisResponse + Ord, + V: FromRedis + Ord, { fn from_value(value: RedisValue) -> Result { + debug_type!("FromRedis(BTreeSet): {:?}", value); value.into_array().into_iter().map(|v| V::from_value(v)).collect() } } // adapted from mitsuhiko -// this seems much better than making callers deal with hlists -macro_rules! impl_redis_response_tuple { +macro_rules! impl_from_redis_tuple { () => (); ($($name:ident,)+) => ( - impl<$($name: RedisResponse),*> RedisResponse for ($($name,)*) { + #[doc(hidden)] + impl<$($name: FromRedis),*> FromRedis for ($($name,)*) { + fn is_tuple() -> bool { + true + } + #[allow(non_snake_case, unused_variables)] fn from_value(v: RedisValue) -> Result<($($name,)*), RedisError> { if let RedisValue::Array(mut values) = v { let mut n = 0; $(let $name = (); n += 1;)* + debug_type!("FromRedis({}-tuple): {:?}", n, values); if values.len() != n { return Err(RedisError::new_parse("Invalid tuple dimension.")); } @@ -340,6 +411,7 @@ macro_rules! impl_redis_response_tuple { fn from_values(mut values: Vec) -> Result, RedisError> { let mut n = 0; $(let $name = (); n += 1;)* + debug_type!("FromRedis({}-tuple): {:?}", n, values); if values.len() % n != 0 { return Err(RedisError::new_parse("Invalid tuple dimension.")) } @@ -356,15 +428,156 @@ macro_rules! impl_redis_response_tuple { Ok(out) } } - impl_redis_response_peel!($($name,)*); + impl_from_redis_peel!($($name,)*); ) } -macro_rules! impl_redis_response_peel { - ($name:ident, $($other:ident,)*) => (impl_redis_response_tuple!($($other,)*);) +macro_rules! impl_from_redis_peel { + ($name:ident, $($other:ident,)*) => (impl_from_redis_tuple!($($other,)*);) } -impl_redis_response_tuple! { T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, } +impl_from_redis_tuple! { T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, } + +macro_rules! impl_from_str_from_redis_key ( + ($t:ty) => { + impl FromRedisKey for $t { + fn from_key(value: RedisKey) -> Result<$t, RedisError> { + value + .as_str() + .and_then(|k| k.parse::<$t>().ok()) + .ok_or(RedisError::new_parse("Cannot parse key from bytes.")) + } + } + } +); + +#[cfg(feature = "serde-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde-json")))] +impl FromRedis for Value { + fn from_value(value: RedisValue) -> Result { + let value = match value { + RedisValue::Null => Value::Null, + RedisValue::Queued => QUEUED.into(), + RedisValue::String(s) => { + if let Some(parsed) = utils::parse_nested_json(&s) { + parsed + } else { + s.to_string().into() + } + } + RedisValue::Bytes(b) => String::from_utf8(b.to_vec())?.into(), + RedisValue::Integer(i) => i.into(), + RedisValue::Double(f) => f.into(), + RedisValue::Boolean(b) => b.into(), + RedisValue::Array(v) => { + let mut out = Vec::with_capacity(v.len()); + for value in v.into_iter() { + out.push(Self::from_value(value)?); + } + Value::Array(out) + } + RedisValue::Map(v) => { + let mut out = Map::with_capacity(v.len()); + for (key, value) in v.inner().into_iter() { + let key = key + .into_string() + .ok_or(RedisError::new_parse("Cannot convert key to string."))?; + let value = Self::from_value(value)?; + + out.insert(key, value); + } + Value::Object(out) + } + }; + + Ok(value) + } +} + +impl FromRedis for RedisKey { + fn from_value(value: RedisValue) -> Result { + let key = match value { + RedisValue::Boolean(b) => b.into(), + RedisValue::Integer(i) => i.into(), + RedisValue::Double(f) => f.into(), + RedisValue::String(s) => s.into(), + RedisValue::Bytes(b) => b.into(), + RedisValue::Queued => RedisKey::from_static_str(QUEUED), + RedisValue::Map(_) | RedisValue::Array(_) => { + return Err(RedisError::new_parse("Cannot convert aggregate type to key.")) + } + RedisValue::Null => return Err(RedisError::new(RedisErrorKind::NotFound, "Cannot convert nil to key.")), + }; + + Ok(key) + } +} + +/// A trait used to convert [RedisKey](crate::types::RedisKey) values to various types. +/// +/// See the [convert](crate::types::RedisKey::convert) documentation for more information. +pub trait FromRedisKey: Sized { + fn from_key(value: RedisKey) -> Result; +} + +impl_from_str_from_redis_key!(u8); +impl_from_str_from_redis_key!(u16); +impl_from_str_from_redis_key!(u32); +impl_from_str_from_redis_key!(u64); +impl_from_str_from_redis_key!(u128); +impl_from_str_from_redis_key!(usize); +impl_from_str_from_redis_key!(i8); +impl_from_str_from_redis_key!(i16); +impl_from_str_from_redis_key!(i32); +impl_from_str_from_redis_key!(i64); +impl_from_str_from_redis_key!(i128); +impl_from_str_from_redis_key!(isize); +impl_from_str_from_redis_key!(f32); +impl_from_str_from_redis_key!(f64); + +impl FromRedisKey for () { + fn from_key(_: RedisKey) -> Result { + Ok(()) + } +} + +impl FromRedisKey for RedisValue { + fn from_key(value: RedisKey) -> Result { + Ok(RedisValue::Bytes(value.into_bytes())) + } +} + +impl FromRedisKey for RedisKey { + fn from_key(value: RedisKey) -> Result { + Ok(value) + } +} + +impl FromRedisKey for String { + fn from_key(value: RedisKey) -> Result { + value + .into_string() + .ok_or(RedisError::new_parse("Cannot parse key as string.")) + } +} + +impl FromRedisKey for Str { + fn from_key(value: RedisKey) -> Result { + Ok(Str::from_inner(value.into_bytes())?) + } +} + +impl FromRedisKey for Vec { + fn from_key(value: RedisKey) -> Result { + Ok(value.into_bytes().to_vec()) + } +} + +impl FromRedisKey for Bytes { + fn from_key(value: RedisKey) -> Result { + Ok(value.into_bytes()) + } +} #[cfg(test)] mod tests { @@ -493,7 +706,7 @@ mod tests { #[test] fn should_convert_bytes() { - let _foo: Vec = RedisValue::Bytes("foo".as_bytes().to_vec()).convert().unwrap(); + let _foo: Vec = RedisValue::Bytes("foo".as_bytes().to_vec().into()).convert().unwrap(); assert_eq!(_foo, "foo".as_bytes().to_vec()); let _foo: Vec = RedisValue::String("foo".into()).convert().unwrap(); assert_eq!(_foo, "foo".as_bytes().to_vec()); diff --git a/src/modules/types.rs b/src/modules/types.rs deleted file mode 100644 index 3129a099..00000000 --- a/src/modules/types.rs +++ /dev/null @@ -1,3014 +0,0 @@ -use crate::client::RedisClient; -use crate::error::*; -use crate::modules::inner::RedisClientInner; -use crate::protocol::connection::OK; -use crate::protocol::types::{KeyScanInner, RedisCommand, RedisCommandKind, ValueScanInner}; -use crate::protocol::utils as protocol_utils; -use crate::utils; -pub use redis_protocol::resp2::types::Frame; -use redis_protocol::resp2::types::NULL; -use std::borrow::Cow; -use std::cmp; -use std::collections::{BTreeMap, HashMap, VecDeque}; -use std::convert::{TryFrom, TryInto}; -use std::fmt; -use std::hash::Hash; -use std::hash::Hasher; -use std::iter::FromIterator; -use std::mem; -use std::net::SocketAddr; -use std::ops::{Deref, DerefMut}; -use std::str; -use std::sync::Arc; -use tokio::task::JoinHandle; - -pub use crate::modules::response::RedisResponse; -pub use crate::protocol::tls::TlsConfig; -pub use crate::protocol::types::{ClusterKeyCache, SlotRange}; - -#[cfg(feature = "metrics")] -#[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] -pub use crate::modules::metrics::Stats; - -#[cfg(feature = "index-map")] -use indexmap::{IndexMap, IndexSet}; -#[cfg(not(feature = "index-map"))] -use std::collections::HashSet; - -pub(crate) static QUEUED: &'static str = "QUEUED"; -pub(crate) static NIL: &'static str = "nil"; - -/// The ANY flag used on certain GEO commands. -pub type Any = bool; -/// The result from any of the `connect` functions showing the error that closed the connection, if any. -pub type ConnectHandle = JoinHandle>; -/// A tuple of `(offset, count)` values for commands that allow paging through results. -pub type Limit = (i64, i64); - -/// Arguments passed to the SHUTDOWN command. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ShutdownFlags { - Save, - NoSave, -} - -impl ShutdownFlags { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ShutdownFlags::Save => "SAVE", - ShutdownFlags::NoSave => "NOSAVE", - } - } -} - -/// An event on the publish-subscribe interface describing a keyspace notification. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct KeyspaceEvent { - pub db: u8, - pub operation: String, - pub key: String, -} - -/// Aggregate options for the [zinterstore](https://redis.io/commands/zinterstore) (and related) commands. -pub enum AggregateOptions { - Sum, - Min, - Max, -} - -impl AggregateOptions { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - AggregateOptions::Sum => "SUM", - AggregateOptions::Min => "MIN", - AggregateOptions::Max => "MAX", - } - } -} - -/// The types of values supported by the [type](https://redis.io/commands/type) command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ScanType { - Set, - String, - ZSet, - List, - Hash, - Stream, -} - -impl ScanType { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ScanType::Set => "set", - ScanType::String => "string", - ScanType::List => "list", - ScanType::ZSet => "zset", - ScanType::Hash => "hash", - ScanType::Stream => "stream", - } - } -} - -/// The result of a SCAN operation. -pub struct ScanResult { - pub(crate) results: Option>, - pub(crate) inner: Arc, - pub(crate) args: Vec, - pub(crate) scan_state: KeyScanInner, - pub(crate) can_continue: bool, -} - -impl ScanResult { - /// Read the current cursor from the SCAN operation. - pub fn cursor(&self) -> &str { - &self.scan_state.cursor - } - - /// Whether or not the scan call will continue returning results. If `false` this will be the last result set returned on the stream. - /// - /// Calling `next` when this returns `false` will return `Ok(())`, so this does not need to be checked on each result. - pub fn has_more(&self) -> bool { - self.can_continue - } - - /// A reference to the results of the SCAN operation. - pub fn results(&self) -> &Option> { - &self.results - } - - /// Take ownership over the results of the SCAN operation. Calls to `results` or `take_results` will return `None` afterwards. - pub fn take_results(&mut self) -> Option> { - self.results.take() - } - - /// Move on to the next page of results from the SCAN operation. If no more results are available this may close the stream. - /// - /// **This must be called to continue scanning the keyspace.** Results are not automatically scanned in the background since - /// this could cause the buffer backing the stream to grow too large very quickly. This interface provides a mechanism - /// for throttling the throughput of the SCAN call. If this struct is dropped without calling this function the stream will - /// close without an error. - /// - /// If this function returns an error the scan call cannot continue as the client has been closed, or some other fatal error - /// has occurred. If this happens the error will appear in the stream from the original SCAN call. - pub fn next(self) -> Result<(), RedisError> { - if !self.can_continue { - return Ok(()); - } - - let kind = RedisCommandKind::Scan(self.scan_state); - let cmd = RedisCommand::new(kind, self.args, None); - utils::send_command(&self.inner, cmd) - } - - /// A lightweight function to create a Redis client from the SCAN result. - /// - /// To continue scanning the caller should call `next` on this struct. Calling `scan` again on the client will initiate a new SCAN call starting with a cursor of 0. - pub fn create_client(&self) -> RedisClient { - RedisClient { - inner: self.inner.clone(), - } - } -} - -/// The result of a HSCAN operation. -pub struct HScanResult { - pub(crate) results: Option, - pub(crate) inner: Arc, - pub(crate) args: Vec, - pub(crate) scan_state: ValueScanInner, - pub(crate) can_continue: bool, -} - -impl HScanResult { - /// Read the current cursor from the SCAN operation. - pub fn cursor(&self) -> &str { - &self.scan_state.cursor - } - - /// Whether or not the scan call will continue returning results. If `false` this will be the last result set returned on the stream. - /// - /// Calling `next` when this returns `false` will return `Ok(())`, so this does not need to be checked on each result. - pub fn has_more(&self) -> bool { - self.can_continue - } - - /// A reference to the results of the HSCAN operation. - pub fn results(&self) -> &Option { - &self.results - } - - /// Take ownership over the results of the HSCAN operation. Calls to `results` or `take_results` will return `None` afterwards. - pub fn take_results(&mut self) -> Option { - self.results.take() - } - - /// Move on to the next page of results from the HSCAN operation. If no more results are available this may close the stream. - /// - /// **This must be called to continue scanning the keyspace.** Results are not automatically scanned in the background since - /// this could cause the buffer backing the stream to grow too large very quickly. This interface provides a mechanism - /// for throttling the throughput of the SCAN call. If this struct is dropped without calling this function the stream will - /// close without an error. - /// - /// If this function returns an error the scan call cannot continue as the client has been closed, or some other fatal error - /// has occurred. If this happens the error will appear in the stream from the original SCAN call. - pub fn next(self) -> Result<(), RedisError> { - if !self.can_continue { - return Ok(()); - } - - let kind = RedisCommandKind::Hscan(self.scan_state); - let cmd = RedisCommand::new(kind, self.args, None); - utils::send_command(&self.inner, cmd) - } - - /// A lightweight function to create a Redis client from the HSCAN result. - /// - /// To continue scanning the caller should call `next` on this struct. Calling `hscan` again on the client will initiate a new HSCAN call starting with a cursor of 0. - pub fn create_client(&self) -> RedisClient { - RedisClient { - inner: self.inner.clone(), - } - } -} - -/// The result of a SCAN operation. -pub struct SScanResult { - pub(crate) results: Option>, - pub(crate) inner: Arc, - pub(crate) args: Vec, - pub(crate) scan_state: ValueScanInner, - pub(crate) can_continue: bool, -} - -impl SScanResult { - /// Read the current cursor from the SSCAN operation. - pub fn cursor(&self) -> &str { - &self.scan_state.cursor - } - - /// Whether or not the scan call will continue returning results. If `false` this will be the last result set returned on the stream. - /// - /// Calling `next` when this returns `false` will return `Ok(())`, so this does not need to be checked on each result. - pub fn has_more(&self) -> bool { - self.can_continue - } - - /// A reference to the results of the SCAN operation. - pub fn results(&self) -> &Option> { - &self.results - } - - /// Take ownership over the results of the SSCAN operation. Calls to `results` or `take_results` will return `None` afterwards. - pub fn take_results(&mut self) -> Option> { - self.results.take() - } - - /// Move on to the next page of results from the SSCAN operation. If no more results are available this may close the stream. - /// - /// **This must be called to continue scanning the keyspace.** Results are not automatically scanned in the background since - /// this could cause the buffer backing the stream to grow too large very quickly. This interface provides a mechanism - /// for throttling the throughput of the SCAN call. If this struct is dropped without calling this function the stream will - /// close without an error. - /// - /// If this function returns an error the scan call cannot continue as the client has been closed, or some other fatal error - /// has occurred. If this happens the error will appear in the stream from the original SCAN call. - pub fn next(self) -> Result<(), RedisError> { - if !self.can_continue { - return Ok(()); - } - - let kind = RedisCommandKind::Sscan(self.scan_state); - let cmd = RedisCommand::new(kind, self.args, None); - utils::send_command(&self.inner, cmd) - } - - /// A lightweight function to create a Redis client from the SSCAN result. - /// - /// To continue scanning the caller should call `next` on this struct. Calling `sscan` again on the client will initiate a new SSCAN call starting with a cursor of 0. - pub fn create_client(&self) -> RedisClient { - RedisClient { - inner: self.inner.clone(), - } - } -} - -/// The result of a SCAN operation. -pub struct ZScanResult { - pub(crate) results: Option>, - pub(crate) inner: Arc, - pub(crate) args: Vec, - pub(crate) scan_state: ValueScanInner, - pub(crate) can_continue: bool, -} - -impl ZScanResult { - /// Read the current cursor from the ZSCAN operation. - pub fn cursor(&self) -> &str { - &self.scan_state.cursor - } - - /// Whether or not the scan call will continue returning results. If `false` this will be the last result set returned on the stream. - /// - /// Calling `next` when this returns `false` will return `Ok(())`, so this does not need to be checked on each result. - pub fn has_more(&self) -> bool { - self.can_continue - } - - /// A reference to the results of the ZSCAN operation. - pub fn results(&self) -> &Option> { - &self.results - } - - /// Take ownership over the results of the ZSCAN operation. Calls to `results` or `take_results` will return `None` afterwards. - pub fn take_results(&mut self) -> Option> { - self.results.take() - } - - /// Move on to the next page of results from the ZSCAN operation. If no more results are available this may close the stream. - /// - /// **This must be called to continue scanning the keyspace.** Results are not automatically scanned in the background since - /// this could cause the buffer backing the stream to grow too large very quickly. This interface provides a mechanism - /// for throttling the throughput of the SCAN call. If this struct is dropped without calling this function the stream will - /// close without an error. - /// - /// If this function returns an error the scan call cannot continue as the client has been closed, or some other fatal error - /// has occurred. If this happens the error will appear in the stream from the original SCAN call. - pub fn next(self) -> Result<(), RedisError> { - if !self.can_continue { - return Ok(()); - } - - let kind = RedisCommandKind::Zscan(self.scan_state); - let cmd = RedisCommand::new(kind, self.args, None); - utils::send_command(&self.inner, cmd) - } - - /// A lightweight function to create a Redis client from the ZSCAN result. - /// - /// To continue scanning the caller should call `next` on this struct. Calling `zscan` again on the client will initiate a new ZSCAN call starting with a cursor of 0. - pub fn create_client(&self) -> RedisClient { - RedisClient { - inner: self.inner.clone(), - } - } -} - -/// Options for the [info](https://redis.io/commands/info) command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum InfoKind { - Default, - All, - Keyspace, - Cluster, - CommandStats, - Cpu, - Replication, - Stats, - Persistence, - Memory, - Clients, - Server, -} - -impl InfoKind { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - InfoKind::Default => "default", - InfoKind::All => "all", - InfoKind::Keyspace => "keyspace", - InfoKind::Cluster => "cluster", - InfoKind::CommandStats => "commandstats", - InfoKind::Cpu => "cpu", - InfoKind::Replication => "replication", - InfoKind::Stats => "stats", - InfoKind::Persistence => "persistence", - InfoKind::Memory => "memory", - InfoKind::Clients => "clients", - InfoKind::Server => "server", - } - } -} - -/// Configuration for custom redis commands, primarily used for interacting with third party modules or extensions. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct CustomCommand { - /// The command name, sent directly to the server. - pub cmd: &'static str, - /// The hash slot to use for the provided command when running against a cluster. If a hash slot is not provided the command will run against a random node in the cluster. - pub hash_slot: Option, - /// Whether or not the command should block the connection while waiting on a response. - pub is_blocking: bool, -} - -/// The type of reconnection policy to use. This will apply to every connection used by the client. -/// -/// Use a `max_attempts` value of `0` to retry forever. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ReconnectPolicy { - /// Wait a constant amount of time between reconnect attempts, in ms. - Constant { - attempts: u32, - max_attempts: u32, - delay: u32, - }, - /// Backoff reconnection attempts linearly, adding `delay` each time. - Linear { - attempts: u32, - max_attempts: u32, - max_delay: u32, - delay: u32, - }, - /// Backoff reconnection attempts exponentially, multiplying the last delay by `mult` each time. - Exponential { - attempts: u32, - max_attempts: u32, - min_delay: u32, - max_delay: u32, - mult: u32, - }, -} - -impl Default for ReconnectPolicy { - fn default() -> Self { - ReconnectPolicy::Constant { - attempts: 0, - max_attempts: 0, - delay: 1000, - } - } -} - -impl ReconnectPolicy { - /// Create a new reconnect policy with a constant backoff. - pub fn new_constant(max_attempts: u32, delay: u32) -> ReconnectPolicy { - ReconnectPolicy::Constant { - max_attempts, - delay, - attempts: 0, - } - } - - /// Create a new reconnect policy with a linear backoff. - pub fn new_linear(max_attempts: u32, max_delay: u32, delay: u32) -> ReconnectPolicy { - ReconnectPolicy::Linear { - max_attempts, - max_delay, - delay, - attempts: 0, - } - } - - /// Create a new reconnect policy with an exponential backoff. - pub fn new_exponential(max_attempts: u32, min_delay: u32, max_delay: u32, mult: u32) -> ReconnectPolicy { - ReconnectPolicy::Exponential { - max_delay, - max_attempts, - min_delay, - mult, - attempts: 0, - } - } - - /// Reset the number of reconnection attempts. It's unlikely users will need to call this. - pub fn reset_attempts(&mut self) { - match *self { - ReconnectPolicy::Constant { ref mut attempts, .. } => { - *attempts = 0; - } - ReconnectPolicy::Linear { ref mut attempts, .. } => { - *attempts = 0; - } - ReconnectPolicy::Exponential { ref mut attempts, .. } => { - *attempts = 0; - } - } - } - - /// Read the number of reconnection attempts. - pub fn attempts(&self) -> u32 { - match *self { - ReconnectPolicy::Constant { ref attempts, .. } => *attempts, - ReconnectPolicy::Linear { ref attempts, .. } => *attempts, - ReconnectPolicy::Exponential { ref attempts, .. } => *attempts, - } - } - - /// Calculate the next delay, incrementing `attempts` in the process. - pub fn next_delay(&mut self) -> Option { - match *self { - ReconnectPolicy::Constant { - ref mut attempts, - delay, - max_attempts, - } => { - *attempts = match utils::incr_with_max(*attempts, max_attempts) { - Some(a) => a, - None => return None, - }; - - Some(delay as u64) - } - ReconnectPolicy::Linear { - ref mut attempts, - max_delay, - max_attempts, - delay, - } => { - *attempts = match utils::incr_with_max(*attempts, max_attempts) { - Some(a) => a, - None => return None, - }; - - Some(cmp::min( - max_delay as u64, - (delay as u64).saturating_mul(*attempts as u64), - )) - } - ReconnectPolicy::Exponential { - ref mut attempts, - min_delay, - max_delay, - max_attempts, - mult, - } => { - *attempts = match utils::incr_with_max(*attempts, max_attempts) { - Some(a) => a, - None => return None, - }; - - Some(cmp::min( - max_delay as u64, - (mult as u64).pow(*attempts - 1).saturating_mul(min_delay as u64), - )) - } - } - } -} - -/// Describes how the client should respond when a command is sent while the client is in a blocked state from a blocking command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Blocking { - /// Wait to send the command until the blocked command finishes. (Default) - Block, - /// Return an error to the caller. - Error, - /// Interrupt the blocked command by automatically sending `CLIENT UNBLOCK` for the blocked connection. - Interrupt, -} - -impl Default for Blocking { - fn default() -> Self { - Blocking::Block - } -} - -/// Configuration options for a `RedisClient`. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct RedisConfig { - /// Whether or not the client should return an error if it cannot connect to the server the first time when being initialized. - /// If `false` the client will run the reconnect logic if it cannot connect to the server the first time, but if `true` the client - /// will return initial connection errors to the caller immediately. - /// - /// Normally the reconnection logic only applies to connections that close unexpectedly, but this flag can apply the same logic to - /// the first connection as it is being created. - /// - /// Note: Callers should use caution setting this to `false` since it can make debugging configuration issues more difficult. - /// - /// Default: `true` - pub fail_fast: bool, - /// Whether or not the client should automatically pipeline commands when possible. - /// - /// Default: `true` - pub pipeline: bool, - /// The default behavior of the client when a command is sent while the connection is blocked on a blocking command. - /// - /// Default: `Blocking::Block` - pub blocking: Blocking, - /// An optional ACL username for the client to use when authenticating. If ACL rules are not configured this should be `None`. - /// - /// Default: `None` - pub username: Option, - /// An optional password for the client to use when authenticating. - /// - /// Default: `None` - pub password: Option, - /// Connection configuration for the server(s). - /// - /// Default: `Centralized(localhost, 6379)` - pub server: ServerConfig, - /// TLS configuration fields. If `None` the connection will not use TLS. - /// - /// Default: `None` - #[cfg(feature = "enable-tls")] - #[cfg_attr(docsrs, doc(cfg(feature = "enable-tls")))] - pub tls: Option, - /// Whether or not to enable tracing for this client. - /// - /// Default: `false` - #[cfg(feature = "partial-tracing")] - #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] - pub tracing: bool, -} - -impl Default for RedisConfig { - fn default() -> Self { - RedisConfig { - fail_fast: true, - pipeline: true, - blocking: Blocking::default(), - username: None, - password: None, - server: ServerConfig::default(), - #[cfg(feature = "enable-tls")] - #[cfg_attr(docsrs, doc(cfg(feature = "enable-tls")))] - tls: None, - #[cfg(feature = "partial-tracing")] - #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] - tracing: false, - } - } -} - -impl RedisConfig { - /// Whether or not the client uses TLS. - #[cfg(feature = "enable-tls")] - pub fn uses_tls(&self) -> bool { - self.tls.is_some() - } - - /// Whether or not the client uses TLS. - #[cfg(not(feature = "enable-tls"))] - pub fn uses_tls(&self) -> bool { - false - } -} - -/// Connection configuration for the Redis server. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ServerConfig { - Centralized { - /// The hostname or IP address of the Redis server. - host: String, - /// The port on which the Redis server is listening. - port: u16, - }, - Clustered { - /// An array of `(host, port)` tuples for nodes in the cluster. Only one node in the cluster needs to be provided here, - /// the rest will be discovered via the `CLUSTER NODES` command. - hosts: Vec<(String, u16)>, - }, - Sentinel { - /// An array of `(host, port)` tuples for each known sentinel instance. - hosts: Vec<(String, u16)>, - /// The service name for primary/main instances. - service_name: String, - - /// An optional ACL username for the client to use when authenticating. - #[cfg(feature = "sentinel-auth")] - #[cfg_attr(docsrs, doc(cfg(feature = "sentinel-auth")))] - username: Option, - /// An optional password for the client to use when authenticating. - #[cfg(feature = "sentinel-auth")] - #[cfg_attr(docsrs, doc(cfg(feature = "sentinel-auth")))] - password: Option, - }, -} - -impl Default for ServerConfig { - fn default() -> Self { - ServerConfig::default_centralized() - } -} - -impl ServerConfig { - /// Create a new centralized config with the provided host and port. - pub fn new_centralized(host: S, port: u16) -> ServerConfig - where - S: Into, - { - ServerConfig::Centralized { - host: host.into(), - port, - } - } - - /// Create a new clustered config with the provided set of hosts and ports. - /// - /// Only one valid host in the cluster needs to be provided here. The client will use `CLUSTER NODES` to discover the other nodes. - pub fn new_clustered(mut hosts: Vec<(S, u16)>) -> ServerConfig - where - S: Into, - { - ServerConfig::Clustered { - hosts: hosts.drain(..).map(|(s, p)| (s.into(), p)).collect(), - } - } - - /// Create a new sentinel config with the provided set of hosts and the name of the service. - /// - /// This library will connect using the details from the [Redis documentation](https://redis.io/topics/sentinel-clients). - pub fn new_sentinel(mut hosts: Vec<(H, u16)>, service_name: N) -> ServerConfig - where - H: Into, - N: Into, - { - ServerConfig::Sentinel { - hosts: hosts.drain(..).map(|(h, p)| (h.into(), p)).collect(), - service_name: service_name.into(), - #[cfg(feature = "sentinel-auth")] - username: None, - #[cfg(feature = "sentinel-auth")] - password: None, - } - } - - /// Create a centralized config with default settings for a local deployment. - pub fn default_centralized() -> ServerConfig { - ServerConfig::Centralized { - host: "127.0.0.1".to_owned(), - port: 6379, - } - } - - /// Create a clustered config with the same defaults as specified in the `create-cluster` script provided by Redis. - pub fn default_clustered() -> ServerConfig { - ServerConfig::Clustered { - hosts: vec![ - ("127.0.0.1".to_owned(), 30001), - ("127.0.0.1".to_owned(), 30002), - ("127.0.0.1".to_owned(), 30003), - ], - } - } - - /// Check if the config is for a clustered Redis deployment. - pub fn is_clustered(&self) -> bool { - match self { - ServerConfig::Clustered { .. } => true, - _ => false, - } - } - - /// Check if the config is for a sentinel deployment. - pub fn is_sentinel(&self) -> bool { - match self { - ServerConfig::Sentinel { .. } => true, - _ => false, - } - } - - /// Read the server hosts or sentinel hosts if using the sentinel interface. - pub fn hosts(&self) -> Vec<(&str, u16)> { - match *self { - ServerConfig::Centralized { ref host, port } => vec![(host.as_str(), port)], - ServerConfig::Clustered { ref hosts } => hosts.iter().map(|(h, p)| (h.as_str(), *p)).collect(), - ServerConfig::Sentinel { ref hosts, .. } => hosts.iter().map(|(h, p)| (h.as_str(), *p)).collect(), - } - } -} - -/// Options for the [set](https://redis.io/commands/set) command. -/// -/// -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum SetOptions { - NX, - XX, -} - -impl SetOptions { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - SetOptions::NX => "NX", - SetOptions::XX => "XX", - } - } -} - -/// Expiration options for the [set](https://redis.io/commands/set) command. -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum Expiration { - /// Expiration in seconds. - EX(i64), - /// Expiration in milliseconds. - PX(i64), - /// Expiration time, in seconds. - EXAT(i64), - /// Expiration time, in milliseconds. - PXAT(i64), - /// Do not reset the TTL. - KEEPTTL, -} - -impl Expiration { - pub(crate) fn into_args(self) -> (&'static str, Option) { - match self { - Expiration::EX(i) => ("EX", Some(i)), - Expiration::PX(i) => ("PX", Some(i)), - Expiration::EXAT(i) => ("EXAT", Some(i)), - Expiration::PXAT(i) => ("PXAT", Some(i)), - Expiration::KEEPTTL => ("KEEPTTL", None), - } - } -} - -/// The state of the underlying connection to the Redis server. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClientState { - Disconnected, - Disconnecting, - Connected, - Connecting, -} - -impl ClientState { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ClientState::Connecting => "Connecting", - ClientState::Connected => "Connected", - ClientState::Disconnecting => "Disconnecting", - ClientState::Disconnected => "Disconnected", - } - } -} - -impl fmt::Display for ClientState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.to_str()) - } -} - -/// A key in Redis. -#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct RedisKey { - key: Vec, -} - -impl RedisKey { - /// Create a new redis key from anything that can be read as bytes. - pub fn new(key: S) -> RedisKey - where - S: Into>, - { - RedisKey { key: key.into() } - } - - /// Read the key as a str slice if it can be parsed as a UTF8 string. - pub fn as_str(&self) -> Option<&str> { - str::from_utf8(&self.key).ok() - } - - /// Read the key as a byte slice. - pub fn as_bytes(&self) -> &[u8] { - &self.key - } - - /// Read the key as a lossy UTF8 string with `String::from_utf8_lossy`. - pub fn as_str_lossy(&self) -> Cow { - String::from_utf8_lossy(&self.key) - } - - /// Convert the key to a UTF8 string, if possible. - pub fn into_string(self) -> Option { - String::from_utf8(self.key).ok() - } - - /// Read the inner bytes making up the key. - pub fn into_bytes(self) -> Vec { - self.key - } - - /// Hash the key to find the associated cluster [hash slot](https://redis.io/topics/cluster-spec#keys-distribution-model). - pub fn cluster_hash(&self) -> u16 { - let as_str = String::from_utf8_lossy(&self.key); - redis_protocol::redis_keyslot(&as_str) - } - - /// Read the `host:port` of the cluster node that owns the key if the client is clustered and the cluster state is known. - pub fn cluster_owner(&self, client: &RedisClient) -> Option> { - if utils::is_clustered(&client.inner.config) { - let hash_slot = self.cluster_hash(); - client - .inner - .cluster_state - .read() - .as_ref() - .and_then(|state| state.get_server(hash_slot).map(|slot| slot.server.clone())) - } else { - None - } - } - - /// Replace this key with an empty string, returning the bytes from the original key. - pub fn take(&mut self) -> Vec { - mem::replace(&mut self.key, Vec::new()) - } -} - -impl From for RedisKey { - fn from(s: String) -> RedisKey { - RedisKey { key: s.into_bytes() } - } -} - -impl<'a> From<&'a str> for RedisKey { - fn from(s: &'a str) -> RedisKey { - RedisKey { - key: s.as_bytes().to_vec(), - } - } -} - -impl<'a> From<&'a String> for RedisKey { - fn from(s: &'a String) -> RedisKey { - RedisKey { - key: s.as_bytes().to_vec(), - } - } -} - -impl<'a> From<&'a RedisKey> for RedisKey { - fn from(k: &'a RedisKey) -> RedisKey { - k.clone() - } -} - -impl<'a> From<&'a [u8]> for RedisKey { - fn from(k: &'a [u8]) -> Self { - RedisKey { key: k.to_vec() } - } -} - -/* -// conflicting impl with MultipleKeys when this is used -// callers should use `RedisKey::new` here -impl From> for RedisKey { - fn from(key: Vec) -> Self { - RedisKey { key } - } -} -*/ - -/// Convenience struct for commands that take 1 or more keys. -pub struct MultipleKeys { - keys: Vec, -} - -impl MultipleKeys { - pub fn new() -> MultipleKeys { - MultipleKeys { keys: Vec::new() } - } - - pub fn inner(self) -> Vec { - self.keys - } - - pub fn len(&self) -> usize { - self.keys.len() - } -} - -impl From for MultipleKeys -where - T: Into, -{ - fn from(d: T) -> Self { - MultipleKeys { keys: vec![d.into()] } - } -} - -impl FromIterator for MultipleKeys -where - T: Into, -{ - fn from_iter>(iter: I) -> Self { - MultipleKeys { - keys: iter.into_iter().map(|k| k.into()).collect(), - } - } -} - -impl From> for MultipleKeys -where - T: Into, -{ - fn from(d: Vec) -> Self { - MultipleKeys { - keys: d.into_iter().map(|k| k.into()).collect(), - } - } -} - -impl From> for MultipleKeys -where - T: Into, -{ - fn from(d: VecDeque) -> Self { - MultipleKeys { - keys: d.into_iter().map(|k| k.into()).collect(), - } - } -} - -/// Convenience struct for commands that take 1 or more strings. -pub type MultipleStrings = MultipleKeys; - -/// Convenience struct for commands that take 1 or more values. -pub struct MultipleValues { - values: Vec, -} - -impl MultipleValues { - pub fn inner(self) -> Vec { - self.values - } - - pub fn len(&self) -> usize { - self.values.len() - } - - /// Convert this a nested `RedisValue`. - pub fn into_values(self) -> RedisValue { - RedisValue::Array(self.values) - } -} - -impl From<()> for MultipleValues { - fn from(_: ()) -> Self { - MultipleValues { values: vec![] } - } -} - -/* -// https://github.com/rust-lang/rust/issues/50133 -impl TryFrom for MultipleValues -where - T: TryInto, - T::Error: Into, -{ - type Error = RedisError; - - fn try_from(d: T) -> Result { - Ok(MultipleValues { values: vec![to!(d)?] }) - } -} -*/ - -impl From for MultipleValues -where - T: Into, -{ - fn from(d: T) -> Self { - MultipleValues { values: vec![d.into()] } - } -} - -impl FromIterator for MultipleValues -where - T: Into, -{ - fn from_iter>(iter: I) -> Self { - MultipleValues { - values: iter.into_iter().map(|v| v.into()).collect(), - } - } -} - -impl TryFrom> for MultipleValues -where - T: TryInto, - T::Error: Into, -{ - type Error = RedisError; - - fn try_from(d: Vec) -> Result { - let mut values = Vec::with_capacity(d.len()); - for value in d.into_iter() { - values.push(to!(value)?); - } - - Ok(MultipleValues { values }) - } -} - -impl TryFrom> for MultipleValues -where - T: TryInto, - T::Error: Into, -{ - type Error = RedisError; - - fn try_from(d: VecDeque) -> Result { - let mut values = Vec::with_capacity(d.len()); - for value in d.into_iter() { - values.push(to!(value)?); - } - - Ok(MultipleValues { values }) - } -} - -/// Convenience struct for `ZINTERSTORE` and `ZUNIONSTORE` when accepting 1 or more `weights` arguments. -pub struct MultipleWeights { - values: Vec, -} - -impl MultipleWeights { - pub fn new() -> MultipleWeights { - MultipleWeights { values: Vec::new() } - } - - pub fn inner(self) -> Vec { - self.values - } - - pub fn len(&self) -> usize { - self.values.len() - } -} - -impl From> for MultipleWeights { - fn from(d: Option) -> Self { - match d { - Some(w) => w.into(), - None => MultipleWeights::new(), - } - } -} - -impl From for MultipleWeights { - fn from(d: f64) -> Self { - MultipleWeights { values: vec![d] } - } -} - -impl FromIterator for MultipleWeights { - fn from_iter>(iter: I) -> Self { - MultipleWeights { - values: iter.into_iter().collect(), - } - } -} - -impl From> for MultipleWeights { - fn from(d: Vec) -> Self { - MultipleWeights { values: d } - } -} - -impl From> for MultipleWeights { - fn from(d: VecDeque) -> Self { - MultipleWeights { - values: d.into_iter().collect(), - } - } -} - -/// Convenience struct for the `ZADD` command to accept 1 or more `(score, value)` arguments. -pub struct MultipleZaddValues { - values: Vec<(f64, RedisValue)>, -} - -impl MultipleZaddValues { - pub fn new() -> MultipleZaddValues { - MultipleZaddValues { values: Vec::new() } - } - - pub fn inner(self) -> Vec<(f64, RedisValue)> { - self.values - } - - pub fn len(&self) -> usize { - self.values.len() - } -} - -impl TryFrom<(f64, T)> for MultipleZaddValues -where - T: TryInto, - T::Error: Into, -{ - type Error = RedisError; - - fn try_from((f, d): (f64, T)) -> Result { - Ok(MultipleZaddValues { - values: vec![(f, to!(d)?)], - }) - } -} - -impl FromIterator<(f64, T)> for MultipleZaddValues -where - T: Into, -{ - fn from_iter>(iter: I) -> Self { - MultipleZaddValues { - values: iter.into_iter().map(|(f, d)| (f, d.into())).collect(), - } - } -} - -impl TryFrom> for MultipleZaddValues -where - T: TryInto, - T::Error: Into, -{ - type Error = RedisError; - - fn try_from(d: Vec<(f64, T)>) -> Result { - let mut values = Vec::with_capacity(d.len()); - for (f, v) in d.into_iter() { - values.push((f, to!(v)?)); - } - - Ok(MultipleZaddValues { values }) - } -} - -impl TryFrom> for MultipleZaddValues -where - T: TryInto, - T::Error: Into, -{ - type Error = RedisError; - - fn try_from(d: VecDeque<(f64, T)>) -> Result { - let mut values = Vec::with_capacity(d.len()); - for (f, v) in d.into_iter() { - values.push((f, to!(v)?)); - } - - Ok(MultipleZaddValues { values }) - } -} - -/// A map of `(String, RedisValue)` pairs. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct RedisMap { - #[cfg(feature = "index-map")] - pub(crate) inner: IndexMap, - #[cfg(not(feature = "index-map"))] - pub(crate) inner: HashMap, -} - -impl RedisMap { - /// Create a new empty map. - pub fn new() -> Self { - RedisMap { - inner: utils::new_map(0), - } - } - - /// Replace the value an empty map, returning the original value. - pub fn take(&mut self) -> Self { - mem::replace(&mut self.inner, utils::new_map(0)).into() - } - - /// Take the inner `IndexMap`. - #[cfg(feature = "index-map")] - #[cfg_attr(docsrs, doc(cfg(feature = "index-map")))] - pub fn inner(self) -> IndexMap { - self.inner - } - - /// Read the number of (key, value) pairs in the map. - pub fn len(&self) -> usize { - self.inner.len() - } - - /// Take the inner `HashMap`. - #[cfg(not(feature = "index-map"))] - #[cfg_attr(docsrs, doc(cfg(not(feature = "index-map"))))] - pub fn inner(self) -> HashMap { - self.inner - } -} - -impl Deref for RedisMap { - #[cfg(feature = "index-map")] - #[cfg_attr(docsrs, doc(cfg(feature = "index-map")))] - type Target = IndexMap; - #[cfg(not(feature = "index-map"))] - #[cfg_attr(docsrs, doc(cfg(not(feature = "index-map"))))] - type Target = HashMap; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl DerefMut for RedisMap { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} - -#[cfg(feature = "index-map")] -#[cfg_attr(docsrs, doc(cfg(feature = "index-map")))] -impl From> for RedisMap { - fn from(d: IndexMap) -> Self { - RedisMap { inner: d } - } -} - -#[cfg(feature = "index-map")] -impl From> for RedisMap { - fn from(d: HashMap) -> Self { - let mut inner = utils::new_map(d.len()); - for (key, value) in d.into_iter() { - inner.insert(key, value); - } - RedisMap { inner } - } -} - -#[cfg(not(feature = "index-map"))] -impl From> for RedisMap { - fn from(d: HashMap) -> Self { - RedisMap { inner: d } - } -} - -impl From> for RedisMap { - fn from(d: BTreeMap) -> Self { - let mut inner = utils::new_map(d.len()); - for (key, value) in d.into_iter() { - inner.insert(key, value); - } - RedisMap { inner } - } -} - -impl> From<(S, RedisValue)> for RedisMap { - fn from(d: (S, RedisValue)) -> Self { - let mut inner = utils::new_map(1); - inner.insert(d.0.into(), d.1); - RedisMap { inner } - } -} - -impl> From> for RedisMap { - fn from(d: Vec<(S, RedisValue)>) -> Self { - let mut inner = utils::new_map(d.len()); - for (key, value) in d.into_iter() { - inner.insert(key.into(), value); - } - RedisMap { inner } - } -} - -impl> From> for RedisMap { - fn from(d: VecDeque<(S, RedisValue)>) -> Self { - let mut inner = utils::new_map(d.len()); - for (key, value) in d.into_iter() { - inner.insert(key.into(), value); - } - RedisMap { inner } - } -} - -/// The kind of value from Redis. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum RedisValueKind { - Integer, - String, - Bytes, - Null, - Queued, - Map, - Array, -} - -impl fmt::Display for RedisValueKind { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let s = match *self { - RedisValueKind::Integer => "Integer", - RedisValueKind::String => "String", - RedisValueKind::Bytes => "Bytes", - RedisValueKind::Null => "nil", - RedisValueKind::Queued => "Queued", - RedisValueKind::Map => "Map", - RedisValueKind::Array => "Array", - }; - - write!(f, "{}", s) - } -} - -/// A value used in a Redis command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum RedisValue { - /// An integer value. - Integer(i64), - /// A string value. - String(String), - /// A binary value to represent non-UTF8 strings. - Bytes(Vec), - /// A `nil` value. - Null, - /// A special value used to indicate a MULTI block command was received by the server. - Queued, - /// A nested map of key/value pairs. - Map(RedisMap), - /// An ordered list of values. - Array(Vec), -} - -impl<'a> RedisValue { - /// Create a new `RedisValue` with the `OK` status. - pub fn new_ok() -> Self { - RedisValue::String(OK.into()) - } - - /// Whether or not the value is a simple string OK value. - pub fn is_ok(&self) -> bool { - match *self { - RedisValue::String(ref s) => s == OK, - _ => false, - } - } - - /// Attempt to convert the value into an integer, returning the original string as an error if the parsing fails. - pub fn into_integer(self) -> Result { - match self { - RedisValue::String(s) => match s.parse::() { - Ok(i) => Ok(RedisValue::Integer(i)), - Err(_) => Err(RedisValue::String(s)), - }, - RedisValue::Integer(i) => Ok(RedisValue::Integer(i)), - _ => Err(self), - } - } - - /// Read the type of the value without any associated data. - pub fn kind(&self) -> RedisValueKind { - match *self { - RedisValue::Integer(_) => RedisValueKind::Integer, - RedisValue::String(_) => RedisValueKind::String, - RedisValue::Bytes(_) => RedisValueKind::Bytes, - RedisValue::Null => RedisValueKind::Null, - RedisValue::Queued => RedisValueKind::Queued, - RedisValue::Map(_) => RedisValueKind::Map, - RedisValue::Array(_) => RedisValueKind::Array, - } - } - - /// Check if the value is null. - pub fn is_null(&self) -> bool { - match *self { - RedisValue::Null => true, - _ => false, - } - } - - /// Check if the value is an integer. - pub fn is_integer(&self) -> bool { - match *self { - RedisValue::Integer(_) => true, - _ => false, - } - } - - /// Check if the value is a string. - pub fn is_string(&self) -> bool { - match *self { - RedisValue::String(_) => true, - _ => false, - } - } - - /// Check if the value is an array of bytes. - pub fn is_bytes(&self) -> bool { - match *self { - RedisValue::Bytes(_) => true, - _ => false, - } - } - - /// Check if the value is a `QUEUED` response. - pub fn is_queued(&self) -> bool { - match *self { - RedisValue::Queued => true, - _ => false, - } - } - - /// Check if the inner string value can be cast to an `f64`. - pub fn is_float(&self) -> bool { - match *self { - RedisValue::String(ref s) => utils::redis_string_to_f64(s).is_ok(), - _ => false, - } - } - - /// Whether or not the value is a `RedisMap`. - pub fn is_map(&self) -> bool { - match *self { - RedisValue::Map(_) => true, - _ => false, - } - } - - /// Whether or not the value is an array. - pub fn is_array(&self) -> bool { - match *self { - RedisValue::Array(_) => true, - _ => false, - } - } - - /// Read and return the inner value as a `u64`, if possible. - pub fn as_u64(&self) -> Option { - match self { - RedisValue::Integer(ref i) => { - if *i >= 0 { - Some(*i as u64) - } else { - None - } - } - RedisValue::String(ref s) => s.parse::().ok(), - RedisValue::Array(ref inner) => { - if inner.len() == 1 { - inner.first().and_then(|v| v.as_u64()) - } else { - None - } - } - _ => None, - } - } - - /// Read and return the inner value as a `i64`, if possible. - pub fn as_i64(&self) -> Option { - match self { - RedisValue::Integer(ref i) => Some(*i), - RedisValue::String(ref s) => s.parse::().ok(), - RedisValue::Array(ref inner) => { - if inner.len() == 1 { - inner.first().and_then(|v| v.as_i64()) - } else { - None - } - } - _ => None, - } - } - - /// Read and return the inner value as a `usize`, if possible. - pub fn as_usize(&self) -> Option { - match self { - RedisValue::Integer(i) => { - if *i >= 0 { - Some(*i as usize) - } else { - None - } - } - RedisValue::String(ref s) => s.parse::().ok(), - RedisValue::Array(ref inner) => { - if inner.len() == 1 { - inner.first().and_then(|v| v.as_usize()) - } else { - None - } - } - _ => None, - } - } - - /// Read and return the inner value as a `f64`, if possible. - pub fn as_f64(&self) -> Option { - match self { - RedisValue::String(ref s) => utils::redis_string_to_f64(s).ok(), - RedisValue::Integer(ref i) => Some(*i as f64), - RedisValue::Array(ref inner) => { - if inner.len() == 1 { - inner.first().and_then(|v| v.as_f64()) - } else { - None - } - } - _ => None, - } - } - - /// Read and return the inner `String` if the value is a string or integer. - pub fn into_string(self) -> Option { - match self { - RedisValue::String(s) => Some(s), - RedisValue::Bytes(b) => String::from_utf8(b).ok(), - RedisValue::Integer(i) => Some(i.to_string()), - RedisValue::Queued => Some(QUEUED.to_owned()), - RedisValue::Array(mut inner) => { - if inner.len() == 1 { - inner.pop().and_then(|v| v.into_string()) - } else { - None - } - } - _ => None, - } - } - - /// Read and return the inner `String` if the value is a string or integer. - /// - /// Note: this will cast integers to strings. - pub fn as_string(&self) -> Option { - match self { - RedisValue::String(ref s) => Some(s.to_owned()), - RedisValue::Bytes(ref b) => str::from_utf8(b).ok().map(|s| s.to_owned()), - RedisValue::Integer(ref i) => Some(i.to_string()), - RedisValue::Queued => Some(QUEUED.to_owned()), - _ => None, - } - } - - /// Read the inner value as a string slice. - /// - /// Null is returned as "nil" and integers are cast to a string. - pub fn as_str(&'a self) -> Option> { - let s = match *self { - RedisValue::String(ref s) => Cow::Borrowed(s.as_str()), - RedisValue::Integer(ref i) => Cow::Owned(i.to_string()), - RedisValue::Null => Cow::Borrowed(NIL), - RedisValue::Queued => Cow::Borrowed(QUEUED), - RedisValue::Bytes(ref b) => return str::from_utf8(b).ok().map(|s| Cow::Borrowed(s)), - _ => return None, - }; - - Some(s) - } - - /// Read the inner value as a string, using `String::from_utf8_lossy` on byte slices. - pub fn as_str_lossy(&self) -> Option> { - let s = match *self { - RedisValue::String(ref s) => Cow::Borrowed(s.as_str()), - RedisValue::Integer(ref i) => Cow::Owned(i.to_string()), - RedisValue::Null => Cow::Borrowed(NIL), - RedisValue::Queued => Cow::Borrowed(QUEUED), - RedisValue::Bytes(ref b) => String::from_utf8_lossy(b), - _ => return None, - }; - - Some(s) - } - - /// Read the inner value as an array of bytes, if possible. - pub fn as_bytes(&self) -> Option<&[u8]> { - match *self { - RedisValue::String(ref s) => Some(s.as_bytes()), - RedisValue::Bytes(ref b) => Some(b), - RedisValue::Queued => Some(QUEUED.as_bytes()), - _ => None, - } - } - - /// Attempt to convert the value to a `bool`. - pub fn as_bool(&self) -> Option { - match *self { - RedisValue::Integer(ref i) => match *i { - 0 => Some(false), - 1 => Some(true), - _ => None, - }, - RedisValue::String(ref s) => match s.as_ref() { - "true" | "TRUE" | "t" | "T" | "1" => Some(true), - "false" | "FALSE" | "f" | "F" | "0" => Some(false), - _ => None, - }, - RedisValue::Null => Some(false), - RedisValue::Array(ref inner) => { - if inner.len() == 1 { - inner.first().and_then(|v| v.as_bool()) - } else { - None - } - } - _ => None, - } - } - - /// Convert the value to an array of `(value, score)` tuples if the redis value is an array result from a sorted set command with scores. - pub fn into_zset_result(self) -> Result, RedisError> { - protocol_utils::value_to_zset_result(self) - } - - /// Attempt to convert this value to a Redis map if it's an array with an even number of elements. - pub fn into_map(self) -> Result { - if let RedisValue::Map(map) = self { - return Ok(map); - } - - if let RedisValue::Array(mut values) = self { - if values.len() % 2 != 0 { - return Err(RedisError::new( - RedisErrorKind::Unknown, - "Expected an even number of elements.", - )); - } - let mut inner = utils::new_map(values.len() / 2); - while values.len() >= 2 { - let value = values.pop().unwrap(); - let key = match values.pop().unwrap().into_string() { - Some(s) => s, - None => { - return Err(RedisError::new( - RedisErrorKind::Unknown, - "Expected redis map string key.", - )) - } - }; - - inner.insert(key, value); - } - - Ok(RedisMap { inner }) - } else { - Err(RedisError::new(RedisErrorKind::Unknown, "Expected array.")) - } - } - - /// Convert the array value to a set, if possible. - #[cfg(not(feature = "index-map"))] - pub fn into_set(self) -> Result, RedisError> { - if let RedisValue::Array(values) = self { - let mut out = HashSet::with_capacity(values.len()); - - for value in values.into_iter() { - out.insert(value); - } - Ok(out) - } else { - Err(RedisError::new(RedisErrorKind::Unknown, "Expected array.")) - } - } - - #[cfg(feature = "index-map")] - pub fn into_set(self) -> Result, RedisError> { - if let RedisValue::Array(values) = self { - let mut out = IndexSet::with_capacity(values.len()); - - for value in values.into_iter() { - out.insert(value); - } - Ok(out) - } else { - Err(RedisError::new(RedisErrorKind::Unknown, "Expected array.")) - } - } - - /// Convert this value to an array if it's an array or map. - /// - /// If the value is not an array or map this returns a single-element array containing the current value. - pub fn into_array(self) -> Vec { - match self { - RedisValue::Array(values) => values, - RedisValue::Map(map) => { - let mut out = Vec::with_capacity(map.len() * 2); - - for (key, value) in map.inner().into_iter() { - out.push(key.into()); - out.push(value); - } - out - } - _ => vec![self], - } - } - - /// Convert the value to an array of bytes, if possible. - pub fn into_bytes(self) -> Option> { - let v = match self { - RedisValue::String(s) => s.into_bytes(), - RedisValue::Bytes(b) => b, - RedisValue::Null => NULL.as_bytes().to_vec(), - RedisValue::Queued => QUEUED.as_bytes().to_vec(), - RedisValue::Array(mut inner) => { - if inner.len() == 1 { - return inner.pop().and_then(|v| v.into_bytes()); - } else { - return None; - } - } - // TODO maybe rethink this - RedisValue::Integer(i) => i.to_string().into_bytes(), - _ => return None, - }; - - Some(v) - } - - /// Convert the value into a `GeoPosition`, if possible. - /// - /// Null values are returned as `None` to work more easily with the result of the `GEOPOS` command. - pub fn as_geo_position(&self) -> Result, RedisError> { - utils::value_to_geo_pos(self) - } - - /// Replace this value with `RedisValue::Null`, returning the original value. - pub fn take(&mut self) -> RedisValue { - mem::replace(self, RedisValue::Null) - } - - /// Attempt to convert this value to any value that implements the [RedisResponse](crate::types::RedisResponse) trait. - /// - /// ```rust - /// # use fred::types::RedisValue; - /// # use std::collections::HashMap; - /// let foo: usize = RedisValue::String("123".into()).convert()?; - /// let foo: i64 = RedisValue::String("123".into()).convert()?; - /// let foo: String = RedisValue::String("123".into()).convert()?; - /// let foo: Vec = RedisValue::Bytes(vec![102, 111, 111]).convert()?; - /// let foo: Vec = RedisValue::String("foo".into()).convert()?; - /// let foo: Vec = RedisValue::Array(vec!["a".into(), "b".into()]).convert()?; - /// let foo: HashMap = RedisValue::Array(vec![ - /// "a".into(), 1.into(), - /// "b".into(), 2.into() - /// ]) - /// .convert()?; - /// let foo: (String, i64) = RedisValue::Array(vec!["a".into(), 1.into()]).convert()?; - /// let foo: Vec<(String, i64)> = RedisValue::Array(vec![ - /// "a".into(), 1.into(), - /// "b".into(), 2.into() - /// ]) - /// .convert()?; - /// // ... - /// ``` - pub fn convert(self) -> Result - where - R: RedisResponse, - { - R::from_value(self) - } -} - -impl Hash for RedisValue { - fn hash(&self, state: &mut H) { - let prefix = match self.kind() { - RedisValueKind::Integer => 'i', - RedisValueKind::String => 's', - RedisValueKind::Null => 'n', - RedisValueKind::Queued => 'h', - RedisValueKind::Array => 'a', - RedisValueKind::Map => 'm', - RedisValueKind::Bytes => 'b', - }; - prefix.hash(state); - - match *self { - RedisValue::Integer(d) => d.hash(state), - RedisValue::String(ref s) => s.hash(state), - RedisValue::Bytes(ref b) => b.hash(state), - RedisValue::Null => NULL.hash(state), - RedisValue::Queued => QUEUED.hash(state), - RedisValue::Map(ref map) => utils::hash_map(map, state), - RedisValue::Array(ref arr) => { - for value in arr.iter() { - value.hash(state); - } - } - } - } -} - -impl From for RedisValue { - fn from(d: u8) -> Self { - RedisValue::Integer(d as i64) - } -} - -impl From for RedisValue { - fn from(d: u16) -> Self { - RedisValue::Integer(d as i64) - } -} - -impl From for RedisValue { - fn from(d: u32) -> Self { - RedisValue::Integer(d as i64) - } -} - -impl From for RedisValue { - fn from(d: i8) -> Self { - RedisValue::Integer(d as i64) - } -} - -impl From for RedisValue { - fn from(d: i16) -> Self { - RedisValue::Integer(d as i64) - } -} - -impl From for RedisValue { - fn from(d: i32) -> Self { - RedisValue::Integer(d as i64) - } -} - -impl From for RedisValue { - fn from(d: i64) -> Self { - RedisValue::Integer(d) - } -} - -impl TryFrom for RedisValue { - type Error = RedisError; - - fn try_from(f: f32) -> Result { - utils::f64_to_redis_string(f as f64) - } -} - -impl TryFrom for RedisValue { - type Error = RedisError; - - fn try_from(f: f64) -> Result { - utils::f64_to_redis_string(f) - } -} - -impl TryFrom for RedisValue { - type Error = RedisError; - - fn try_from(d: u64) -> Result { - if d >= (i64::MAX as u64) { - return Err(RedisError::new(RedisErrorKind::Unknown, "Unsigned integer too large.")); - } - - Ok((d as i64).into()) - } -} - -impl TryFrom for RedisValue { - type Error = RedisError; - - fn try_from(d: u128) -> Result { - if d >= (i64::MAX as u128) { - return Err(RedisError::new(RedisErrorKind::Unknown, "Unsigned integer too large.")); - } - - Ok((d as i64).into()) - } -} - -impl TryFrom for RedisValue { - type Error = RedisError; - - fn try_from(d: i128) -> Result { - if d >= (i64::MAX as i128) { - return Err(RedisError::new(RedisErrorKind::Unknown, "Signed integer too large.")); - } - - Ok((d as i64).into()) - } -} - -impl TryFrom for RedisValue { - type Error = RedisError; - - fn try_from(d: usize) -> Result { - if d >= (i64::MAX as usize) { - return Err(RedisError::new(RedisErrorKind::Unknown, "Unsigned integer too large.")); - } - - Ok((d as i64).into()) - } -} - -impl From for RedisValue { - fn from(d: String) -> Self { - RedisValue::String(d) - } -} - -impl<'a> From<&'a str> for RedisValue { - fn from(d: &'a str) -> Self { - RedisValue::String(d.to_owned()) - } -} - -impl<'a> From<&'a String> for RedisValue { - fn from(s: &'a String) -> Self { - RedisValue::String(s.clone()) - } -} - -impl<'a> From<&'a [u8]> for RedisValue { - fn from(b: &'a [u8]) -> Self { - RedisValue::Bytes(b.to_vec()) - } -} - -impl From for RedisValue { - fn from(d: bool) -> Self { - RedisValue::from(match d { - true => "true", - false => "false", - }) - } -} - -impl TryFrom> for RedisValue -where - T: TryInto, - T::Error: Into, -{ - type Error = RedisError; - - fn try_from(d: Option) -> Result { - match d { - Some(i) => to!(i), - None => Ok(RedisValue::Null), - } - } -} - -impl FromIterator for RedisValue { - fn from_iter>(iter: I) -> Self { - RedisValue::Array(iter.into_iter().collect()) - } -} - -#[cfg(feature = "index-map")] -#[cfg_attr(docsrs, doc(cfg(feature = "index-map")))] -impl From> for RedisValue { - fn from(d: IndexMap) -> Self { - RedisValue::Map(d.into()) - } -} - -impl From> for RedisValue { - fn from(d: HashMap) -> Self { - RedisValue::Map(d.into()) - } -} - -impl From> for RedisValue { - fn from(d: BTreeMap) -> Self { - RedisValue::Map(d.into()) - } -} - -impl From for RedisValue { - fn from(d: RedisKey) -> Self { - RedisValue::Bytes(d.key) - } -} - -/// The parsed result of the MEMORY STATS command for a specific database. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct DatabaseMemoryStats { - pub overhead_hashtable_main: u64, - pub overhead_hashtable_expires: u64, -} - -impl Default for DatabaseMemoryStats { - fn default() -> Self { - DatabaseMemoryStats { - overhead_hashtable_expires: 0, - overhead_hashtable_main: 0, - } - } -} - -/// The parsed result of the MEMORY STATS command. -/// -/// -#[derive(Clone, Debug)] -pub struct MemoryStats { - pub peak_allocated: u64, - pub total_allocated: u64, - pub startup_allocated: u64, - pub replication_backlog: u64, - pub clients_slaves: u64, - pub clients_normal: u64, - pub aof_buffer: u64, - pub lua_caches: u64, - pub overhead_total: u64, - pub keys_count: u64, - pub keys_bytes_per_key: u64, - pub dataset_bytes: u64, - pub dataset_percentage: f64, - pub peak_percentage: f64, - pub fragmentation: f64, - pub fragmentation_bytes: u64, - pub rss_overhead_ratio: f64, - pub rss_overhead_bytes: u64, - pub allocator_allocated: u64, - pub allocator_active: u64, - pub allocator_resident: u64, - pub allocator_fragmentation_ratio: f64, - pub allocator_fragmentation_bytes: u64, - pub allocator_rss_ratio: f64, - pub allocator_rss_bytes: u64, - pub db: HashMap, -} - -impl Default for MemoryStats { - fn default() -> Self { - MemoryStats { - peak_allocated: 0, - total_allocated: 0, - startup_allocated: 0, - replication_backlog: 0, - clients_normal: 0, - clients_slaves: 0, - aof_buffer: 0, - lua_caches: 0, - overhead_total: 0, - keys_count: 0, - keys_bytes_per_key: 0, - dataset_bytes: 0, - dataset_percentage: 0.0, - peak_percentage: 0.0, - fragmentation: 0.0, - fragmentation_bytes: 0, - rss_overhead_ratio: 0.0, - rss_overhead_bytes: 0, - allocator_allocated: 0, - allocator_active: 0, - allocator_resident: 0, - allocator_fragmentation_ratio: 0.0, - allocator_fragmentation_bytes: 0, - allocator_rss_bytes: 0, - allocator_rss_ratio: 0.0, - db: HashMap::new(), - } - } -} - -impl PartialEq for MemoryStats { - fn eq(&self, other: &Self) -> bool { - self.peak_allocated == other.peak_allocated - && self.total_allocated == other.total_allocated - && self.startup_allocated == other.startup_allocated - && self.replication_backlog == other.replication_backlog - && self.clients_normal == other.clients_normal - && self.clients_slaves == other.clients_slaves - && self.aof_buffer == other.aof_buffer - && self.lua_caches == other.lua_caches - && self.overhead_total == other.overhead_total - && self.keys_count == other.keys_count - && self.keys_bytes_per_key == other.keys_bytes_per_key - && self.dataset_bytes == other.dataset_bytes - && utils::f64_eq(self.dataset_percentage, other.dataset_percentage) - && utils::f64_eq(self.peak_percentage, other.peak_percentage) - && utils::f64_eq(self.fragmentation, other.fragmentation) - && self.fragmentation_bytes == other.fragmentation_bytes - && utils::f64_eq(self.rss_overhead_ratio, other.rss_overhead_ratio) - && self.rss_overhead_bytes == other.rss_overhead_bytes - && self.allocator_allocated == other.allocator_allocated - && self.allocator_active == other.allocator_active - && self.allocator_resident == other.allocator_resident - && utils::f64_eq(self.allocator_fragmentation_ratio, other.allocator_fragmentation_ratio) - && self.allocator_fragmentation_bytes == other.allocator_fragmentation_bytes - && self.allocator_rss_bytes == other.allocator_rss_bytes - && utils::f64_eq(self.allocator_rss_ratio, other.allocator_rss_ratio) - && self.db == other.db - } -} - -impl Eq for MemoryStats {} - -/// ACL rules describing the keys a user can access. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum AclKeyPattern { - AllKeys, - Custom(String), -} - -impl AclKeyPattern { - pub(crate) fn to_string(&self) -> String { - match *self { - AclKeyPattern::AllKeys => "allkeys".into(), - AclKeyPattern::Custom(ref pat) => format!("~{}", pat), - } - } -} - -/// ACL rules describing the channels a user can access. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum AclChannelPattern { - AllChannels, - Custom(String), -} - -impl AclChannelPattern { - pub(crate) fn to_string(&self) -> String { - match *self { - AclChannelPattern::AllChannels => "allchannels".into(), - AclChannelPattern::Custom(ref pat) => format!("&{}", pat), - } - } -} - -/// ACL rules describing the commands a user can access. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum AclCommandPattern { - AllCommands, - NoCommands, - Custom { - command: String, - subcommand: Option, - }, -} - -impl AclCommandPattern { - pub(crate) fn to_string(&self, prefix: &'static str) -> String { - match *self { - AclCommandPattern::AllCommands => "allcommands".into(), - AclCommandPattern::NoCommands => "nocommands".into(), - AclCommandPattern::Custom { - ref command, - ref subcommand, - } => { - if let Some(subcommand) = subcommand { - format!("{}{}|{}", prefix, command, subcommand) - } else { - format!("{}{}", prefix, command) - } - } - } - } -} - -/// ACL rules associated with a user. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum AclRule { - On, - Off, - Reset, - ResetChannels, - ResetKeys, - AddKeys(AclKeyPattern), - AddChannels(AclChannelPattern), - AddCommands(AclCommandPattern), - RemoveCommands(AclCommandPattern), - AddCategory(String), - RemoveCategory(String), - NoPass, - AddPassword(String), - AddHashedPassword(String), - RemovePassword(String), - RemoveHashedPassword(String), -} - -impl AclRule { - pub(crate) fn to_string(&self) -> String { - match self { - AclRule::On => "on".into(), - AclRule::Off => "off".into(), - AclRule::Reset => "reset".into(), - AclRule::ResetChannels => "resetchannels".into(), - AclRule::ResetKeys => "resetkeys".into(), - AclRule::NoPass => "nopass".into(), - AclRule::AddPassword(ref pass) => format!(">{}", pass), - AclRule::RemovePassword(ref pass) => format!("<{}", pass), - AclRule::AddHashedPassword(ref pass) => format!("#{}", pass), - AclRule::RemoveHashedPassword(ref pass) => format!("!{}", pass), - AclRule::AddCategory(ref cat) => format!("+@{}", cat), - AclRule::RemoveCategory(ref cat) => format!("-@{}", cat), - AclRule::AddKeys(ref pat) => pat.to_string(), - AclRule::AddChannels(ref pat) => pat.to_string(), - AclRule::AddCommands(ref pat) => pat.to_string("+"), - AclRule::RemoveCommands(ref pat) => pat.to_string("-"), - } - } -} - -/// A flag from the ACL GETUSER command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum AclUserFlag { - On, - Off, - AllKeys, - AllChannels, - AllCommands, - NoPass, -} - -/// An ACL user from the ACL GETUSER command. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq, Default)] -pub struct AclUser { - pub flags: Vec, - pub passwords: Vec, - pub commands: Vec, - pub keys: Vec, - pub channels: Vec, -} - -/// The output of an entry in the slow queries log. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct SlowlogEntry { - pub id: i64, - pub timestamp: i64, - pub duration: u64, - pub args: Vec, - pub ip: Option, - pub name: Option, -} - -/// The direction to move elements in a *LMOVE command. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum LMoveDirection { - Left, - Right, -} - -impl LMoveDirection { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - LMoveDirection::Left => "LEFT", - LMoveDirection::Right => "RIGHT", - } - } -} - -/// The type of clients to close. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClientKillType { - Normal, - Master, - Replica, - Pubsub, -} - -impl ClientKillType { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ClientKillType::Normal => "normal", - ClientKillType::Master => "master", - ClientKillType::Replica => "replica", - ClientKillType::Pubsub => "pubsub", - } - } -} - -/// Filters provided to the CLIENT KILL command. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClientKillFilter { - ID(String), - Type(ClientKillType), - User(String), - Addr(String), - LAddr(String), - SkipMe(bool), -} - -impl ClientKillFilter { - pub(crate) fn to_str(&self) -> (&'static str, &str) { - match *self { - ClientKillFilter::ID(ref id) => ("ID", id), - ClientKillFilter::Type(ref kind) => ("TYPE", kind.to_str()), - ClientKillFilter::User(ref user) => ("USER", user), - ClientKillFilter::Addr(ref addr) => ("ADDR", addr), - ClientKillFilter::LAddr(ref addr) => ("LADDR", addr), - ClientKillFilter::SkipMe(ref b) => ( - "SKIPME", - match *b { - true => "yes", - false => "no", - }, - ), - } - } -} - -/// Filters for the CLIENT PAUSE command. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClientPauseKind { - Write, - All, -} - -impl ClientPauseKind { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ClientPauseKind::Write => "WRITE", - ClientPauseKind::All => "ALL", - } - } -} - -/// Arguments for the CLIENT REPLY command. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClientReplyFlag { - On, - Off, - Skip, -} - -impl ClientReplyFlag { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ClientReplyFlag::On => "ON", - ClientReplyFlag::Off => "OFF", - ClientReplyFlag::Skip => "SKIP", - } - } -} - -/// Arguments to the CLIENT UNBLOCK command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClientUnblockFlag { - Timeout, - Error, -} - -impl ClientUnblockFlag { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ClientUnblockFlag::Timeout => "TIMEOUT", - ClientUnblockFlag::Error => "ERROR", - } - } -} - -/// The state of the cluster from the CLUSTER INFO command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClusterState { - Ok, - Fail, -} - -impl Default for ClusterState { - fn default() -> Self { - ClusterState::Ok - } -} - -/// A parsed response from the CLUSTER INFO command. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq, Default)] -pub struct ClusterInfo { - pub cluster_state: ClusterState, - pub cluster_slots_assigned: u16, - pub cluster_slots_ok: u16, - pub cluster_slots_pfail: u16, - pub cluster_slots_fail: u16, - pub cluster_known_nodes: u16, - pub cluster_size: u32, - pub cluster_current_epoch: u64, - pub cluster_my_epoch: u64, - pub cluster_stats_messages_sent: u64, - pub cluster_stats_messages_received: u64, -} - -/// A convenience struct for functions that take one or more hash slot values. -pub struct MultipleHashSlots { - inner: Vec, -} - -impl MultipleHashSlots { - pub fn inner(self) -> Vec { - self.inner - } - - pub fn len(&self) -> usize { - self.inner.len() - } -} - -impl From for MultipleHashSlots { - fn from(d: u16) -> Self { - MultipleHashSlots { inner: vec![d] } - } -} - -impl From> for MultipleHashSlots { - fn from(d: Vec) -> Self { - MultipleHashSlots { inner: d } - } -} - -impl From> for MultipleHashSlots { - fn from(d: VecDeque) -> Self { - MultipleHashSlots { - inner: d.into_iter().collect(), - } - } -} - -impl FromIterator for MultipleHashSlots { - fn from_iter>(iter: I) -> Self { - MultipleHashSlots { - inner: iter.into_iter().collect(), - } - } -} - -/// Options for the CLUSTER FAILOVER command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClusterFailoverFlag { - Force, - Takeover, -} - -impl ClusterFailoverFlag { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ClusterFailoverFlag::Force => "FORCE", - ClusterFailoverFlag::Takeover => "TAKEOVER", - } - } -} - -/// Flags for the CLUSTER RESET command. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClusterResetFlag { - Hard, - Soft, -} - -impl ClusterResetFlag { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ClusterResetFlag::Hard => "HARD", - ClusterResetFlag::Soft => "SOFT", - } - } -} - -/// Flags for the CLUSTER SETSLOT command. -/// -/// -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClusterSetSlotState { - Importing, - Migrating, - Stable, - Node(String), -} - -impl ClusterSetSlotState { - pub(crate) fn to_str(&self) -> (&'static str, Option<&str>) { - match *self { - ClusterSetSlotState::Importing => ("IMPORTING", None), - ClusterSetSlotState::Migrating => ("MIGRATING", None), - ClusterSetSlotState::Stable => ("STABLE", None), - ClusterSetSlotState::Node(ref n) => ("NODE", Some(n)), - } - } -} - -/// A struct describing the longitude and latitude coordinates of a GEO command. -#[derive(Clone, Debug)] -pub struct GeoPosition { - pub longitude: f64, - pub latitude: f64, -} - -impl PartialEq for GeoPosition { - fn eq(&self, other: &Self) -> bool { - utils::f64_eq(self.longitude, other.longitude) && utils::f64_eq(self.latitude, other.latitude) - } -} - -impl Eq for GeoPosition {} - -impl From<(f64, f64)> for GeoPosition { - fn from(d: (f64, f64)) -> Self { - GeoPosition { - longitude: d.0, - latitude: d.1, - } - } -} - -/// Units for the GEO DIST command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum GeoUnit { - Meters, - Kilometers, - Miles, - Feet, -} - -impl GeoUnit { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - GeoUnit::Meters => "m", - GeoUnit::Kilometers => "km", - GeoUnit::Feet => "ft", - GeoUnit::Miles => "mi", - } - } -} - -/// A struct describing the value inside a GEO data structure. -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct GeoValue { - pub coordinates: GeoPosition, - pub member: RedisValue, -} - -impl GeoValue { - pub fn new>(coordinates: GeoPosition, member: V) -> Self { - let member = member.into(); - GeoValue { coordinates, member } - } -} - -impl TryFrom<(f64, f64, T)> for GeoValue -where - T: TryInto, - T::Error: Into, -{ - type Error = RedisError; - - fn try_from(v: (f64, f64, T)) -> Result { - Ok(GeoValue { - coordinates: GeoPosition { - longitude: v.0, - latitude: v.1, - }, - member: utils::try_into(v.2)?, - }) - } -} - -/// A convenience struct for commands that take one or more GEO values. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct MultipleGeoValues { - inner: Vec, -} - -impl MultipleGeoValues { - pub fn len(&self) -> usize { - self.inner.len() - } - - pub fn inner(self) -> Vec { - self.inner - } -} - -impl From for MultipleGeoValues { - fn from(d: GeoValue) -> Self { - MultipleGeoValues { inner: vec![d] } - } -} - -impl From> for MultipleGeoValues { - fn from(d: Vec) -> Self { - MultipleGeoValues { inner: d } - } -} - -impl From> for MultipleGeoValues { - fn from(d: VecDeque) -> Self { - MultipleGeoValues { - inner: d.into_iter().collect(), - } - } -} - -/// The sort order for redis commands that take or return a sorted list. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum SortOrder { - Asc, - Desc, -} - -impl SortOrder { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - SortOrder::Asc => "ASC", - SortOrder::Desc => "DESC", - } - } -} - -/// A typed struct representing the full output of the GEORADIUS (or similar) command. -#[derive(Clone, Debug)] -pub struct GeoRadiusInfo { - pub member: RedisValue, - pub position: Option, - pub distance: Option, - pub hash: Option, -} - -impl Default for GeoRadiusInfo { - fn default() -> Self { - GeoRadiusInfo { - member: RedisValue::Null, - position: None, - distance: None, - hash: None, - } - } -} - -impl PartialEq for GeoRadiusInfo { - fn eq(&self, other: &Self) -> bool { - self.member == other.member - && self.position == other.position - && self.hash == other.hash - && utils::f64_opt_eq(&self.distance, &other.distance) - } -} - -impl Eq for GeoRadiusInfo {} - -/// Flags for the SCRIPT DEBUG command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ScriptDebugFlag { - Yes, - No, - Sync, -} - -impl ScriptDebugFlag { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ScriptDebugFlag::Yes => "YES", - ScriptDebugFlag::No => "NO", - ScriptDebugFlag::Sync => "SYNC", - } - } -} - -/// Location flag for the `LINSERT` command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ListLocation { - Before, - After, -} - -impl ListLocation { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ListLocation::Before => "BEFORE", - ListLocation::After => "AFTER", - } - } -} - -/// Ordering options for the ZADD (and related) commands. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Ordering { - GreaterThan, - LessThan, -} - -impl Ordering { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - Ordering::GreaterThan => "GT", - Ordering::LessThan => "LT", - } - } -} - -/// Options for the ZRANGE (and related) commands. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ZSort { - ByScore, - ByLex, -} - -impl ZSort { - pub(crate) fn to_str(&self) -> &'static str { - match *self { - ZSort::ByScore => "BYSCORE", - ZSort::ByLex => "BYLEX", - } - } -} - -/// An index, score, lexicographical, or +|-|+inf|-inf range bound for the ZRANGE command. -#[derive(Clone, Debug)] -pub enum ZRangeBound { - /// Index ranges () - Index(i64), - /// Score ranges () - Score(f64), - /// Lexicographical ranges () - Lex(String), - /// Shortcut for the `+` character. - InfiniteLex, - /// Shortcut for the `-` character. - NegInfinityLex, - /// Shortcut for the `+inf` range bound. - InfiniteScore, - /// Shortcut for the `-inf` range bound. - NegInfiniteScore, -} - -impl From for ZRangeBound { - fn from(i: i64) -> Self { - ZRangeBound::Index(i) - } -} - -impl<'a> From<&'a str> for ZRangeBound { - fn from(s: &'a str) -> Self { - if s == "+inf" { - ZRangeBound::InfiniteScore - } else if s == "-inf" { - ZRangeBound::NegInfiniteScore - } else { - ZRangeBound::Lex(s.to_owned()) - } - } -} - -impl From for ZRangeBound { - fn from(s: String) -> Self { - if s == "+inf" { - ZRangeBound::InfiniteScore - } else if s == "-inf" { - ZRangeBound::NegInfiniteScore - } else { - ZRangeBound::Lex(s) - } - } -} - -impl<'a> From<&'a String> for ZRangeBound { - fn from(s: &'a String) -> Self { - s.as_str().into() - } -} - -impl TryFrom for ZRangeBound { - type Error = RedisError; - - fn try_from(f: f64) -> Result { - let value = if f.is_infinite() && f.is_sign_negative() { - ZRangeBound::NegInfiniteScore - } else if f.is_infinite() { - ZRangeBound::InfiniteScore - } else if f.is_nan() { - return Err(RedisError::new( - RedisErrorKind::Unknown, - "Cannot use NaN as zrange field.", - )); - } else { - ZRangeBound::Score(f) - }; - - Ok(value) - } -} - -/// The type of range interval bound. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ZRangeKind { - Inclusive, - Exclusive, -} - -impl Default for ZRangeKind { - fn default() -> Self { - ZRangeKind::Inclusive - } -} - -/// A wrapper struct for a range bound in a sorted set command. -#[derive(Clone, Debug)] -pub struct ZRange { - pub kind: ZRangeKind, - pub range: ZRangeBound, -} - -impl ZRange { - pub(crate) fn into_value(self) -> Result { - let value = if self.kind == ZRangeKind::Exclusive { - match self.range { - ZRangeBound::Index(i) => format!("({}", i).into(), - ZRangeBound::Score(f) => utils::f64_to_zrange_bound(f, &self.kind)?.into(), - ZRangeBound::Lex(s) => utils::check_lex_str(s, &self.kind).into(), - ZRangeBound::InfiniteLex => "+".into(), - ZRangeBound::NegInfinityLex => "-".into(), - ZRangeBound::InfiniteScore => "+inf".into(), - ZRangeBound::NegInfiniteScore => "-inf".into(), - } - } else { - match self.range { - ZRangeBound::Index(i) => i.into(), - ZRangeBound::Score(f) => f.try_into()?, - ZRangeBound::Lex(s) => utils::check_lex_str(s, &self.kind).into(), - ZRangeBound::InfiniteLex => "+".into(), - ZRangeBound::NegInfinityLex => "-".into(), - ZRangeBound::InfiniteScore => "+inf".into(), - ZRangeBound::NegInfiniteScore => "-inf".into(), - } - }; - - Ok(value) - } -} - -impl From for ZRange { - fn from(i: i64) -> Self { - ZRange { - kind: ZRangeKind::default(), - range: i.into(), - } - } -} - -impl<'a> From<&'a str> for ZRange { - fn from(s: &'a str) -> Self { - ZRange { - kind: ZRangeKind::default(), - range: s.into(), - } - } -} - -impl From for ZRange { - fn from(s: String) -> Self { - ZRange { - kind: ZRangeKind::default(), - range: s.into(), - } - } -} - -impl<'a> From<&'a String> for ZRange { - fn from(s: &'a String) -> Self { - ZRange { - kind: ZRangeKind::default(), - range: s.as_str().into(), - } - } -} - -impl TryFrom for ZRange { - type Error = RedisError; - - fn try_from(f: f64) -> Result { - Ok(ZRange { - kind: ZRangeKind::default(), - range: f.try_into()?, - }) - } -} - -/// Arguments for the `SENTINEL SIMULATE-FAILURE` command. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg(feature = "sentinel-client")] -#[cfg_attr(docsrs, doc(cfg(feature = "sentinel-client")))] -pub enum SentinelFailureKind { - CrashAfterElection, - CrashAfterPromotion, - Help, -} - -#[cfg(feature = "sentinel-client")] -impl SentinelFailureKind { - pub(crate) fn to_str(&self) -> &'static str { - match self { - SentinelFailureKind::CrashAfterElection => "crash-after-election", - SentinelFailureKind::CrashAfterPromotion => "crash-after-promotion", - SentinelFailureKind::Help => "help", - } - } -} - -impl<'a> From<&'a ZRange> for ZRange { - fn from(range: &'a ZRange) -> Self { - range.clone() - } -} - -/// A trait that can be used to override DNS resolution logic for a client. -/// -/// Note: using this requires [async-trait](https://crates.io/crates/async-trait). -// TODO expose this to callers so they can do their own DNS resolution -#[async_trait] -pub(crate) trait Resolve: Send + Sync + 'static { - /// Resolve a hostname. - async fn resolve(&self, host: String, port: u16) -> Result; -} diff --git a/src/monitor/parser.rs b/src/monitor/parser.rs index e57d08e6..6f685449 100644 --- a/src/monitor/parser.rs +++ b/src/monitor/parser.rs @@ -7,7 +7,7 @@ use nom::combinator::{map_res as nom_map_res, opt as nom_opt}; use nom::multi::many0 as nom_many0; use nom::sequence::{delimited as nom_delimited, preceded as nom_preceded, terminated as nom_terminated}; use nom::IResult; -use redis_protocol::resp2::types::Frame as ProtocolFrame; +use redis_protocol::resp3::types::Frame as Resp3Frame; use redis_protocol::types::RedisParseError; use std::str; use std::sync::Arc; @@ -29,9 +29,9 @@ fn to_u8(s: &str) -> Result> { fn to_redis_value(s: &[u8]) -> Result> { // TODO make this smarter in the future if let Ok(value) = str::from_utf8(s) { - Ok(RedisValue::String(value.to_owned())) + Ok(RedisValue::String(value.into())) } else { - Ok(RedisValue::Bytes(s.to_vec())) + Ok(RedisValue::Bytes(s.to_vec().into())) } } @@ -120,10 +120,11 @@ fn log_frame(inner: &Arc, frame: &[u8]) { #[cfg(not(feature = "network-logs"))] fn log_frame(_: &Arc, _: &[u8]) {} -pub fn parse(inner: &Arc, frame: ProtocolFrame) -> Option { +pub fn parse(inner: &Arc, frame: Resp3Frame) -> Option { let frame_bytes = match frame { - ProtocolFrame::SimpleString(ref s) => s.as_bytes(), - ProtocolFrame::BulkString(ref b) => b, + Resp3Frame::SimpleString { ref data, .. } => data, + Resp3Frame::BlobString { ref data, .. } => data, + Resp3Frame::VerbatimString { ref data, .. } => data, _ => { _warn!(inner, "Unexpected frame type on monitor stream: {:?}", frame.kind()); return None; diff --git a/src/monitor/utils.rs b/src/monitor/utils.rs index cfb1d4fd..481c2d34 100644 --- a/src/monitor/utils.rs +++ b/src/monitor/utils.rs @@ -4,11 +4,10 @@ use crate::monitor::parser; use crate::monitor::{Command, Config}; use crate::protocol::codec::RedisCodec; use crate::protocol::connection::{self, RedisTransport}; -use crate::protocol::types::{RedisCommand, RedisCommandKind}; +use crate::protocol::types::{ProtocolFrame, RedisCommand, RedisCommandKind}; use crate::protocol::utils as protocol_utils; use crate::types::{RedisConfig, ServerConfig}; use futures::stream::{Stream, StreamExt}; -use redis_protocol::resp2::types::Frame as ProtocolFrame; use std::sync::Arc; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; @@ -24,13 +23,13 @@ async fn handle_monitor_frame( frame: Result, ) -> Option { let frame = match frame { - Ok(frame) => frame, + Ok(frame) => frame.into_resp3(), Err(e) => { _error!(inner, "Error on monitor stream: {:?}", e); return None; } }; - let frame_size = protocol_utils::frame_size(&frame); + let frame_size = protocol_utils::resp3_frame_size(&frame); if frame_size >= globals().blocking_encode_threshold() { // since this isn't called from the Encoder/Decoder trait we can use spawn_blocking here @@ -56,7 +55,7 @@ async fn handle_monitor_frame( frame: Result, ) -> Option { let frame = match frame { - Ok(frame) => frame, + Ok(frame) => frame.into_resp3(), Err(e) => { _error!(inner, "Error on monitor stream: {:?}", e); return None; @@ -106,12 +105,12 @@ async fn send_monitor_command( let command = RedisCommand::new(RedisCommandKind::Monitor, vec![], None); let (frame, connection) = match connection { RedisTransport::Tcp(framed) => { - let (frame, framed) = connection::request_response(framed, &command).await?; - (frame, RedisTransport::Tcp(framed)) + let (frame, framed) = connection::request_response(framed, &command, false).await?; + (frame.into_resp3(), RedisTransport::Tcp(framed)) } RedisTransport::Tls(framed) => { - let (frame, framed) = connection::request_response(framed, &command).await?; - (frame, RedisTransport::Tls(framed)) + let (frame, framed) = connection::request_response(framed, &command, false).await?; + (frame.into_resp3(), RedisTransport::Tls(framed)) } }; diff --git a/src/multiplexer/commands.rs b/src/multiplexer/commands.rs index eaa2081b..538355dc 100644 --- a/src/multiplexer/commands.rs +++ b/src/multiplexer/commands.rs @@ -1,6 +1,5 @@ -use crate::client::RedisClient; +use crate::clients::RedisClient; use crate::error::{RedisError, RedisErrorKind}; -use crate::globals::globals; use crate::modules::inner::RedisClientInner; use crate::multiplexer::{utils, SentCommand}; use crate::multiplexer::{Backpressure, Multiplexer}; @@ -11,7 +10,7 @@ use crate::trace; use crate::types::{ClientState, ReconnectPolicy, ServerConfig}; use crate::utils as client_utils; use redis_protocol::redis_keyslot; -use redis_protocol::resp2::types::Frame as ProtocolFrame; +use redis_protocol::resp3::types::Frame as Resp3Frame; use std::collections::VecDeque; use std::ops::DerefMut; use std::sync::Arc; @@ -197,8 +196,12 @@ fn next_reconnect_delay( error: &RedisError, ) -> Option { if error.is_cluster_error() { - let amt = globals().cluster_error_cache_delay(); - _debug!(inner, "Waiting {} ms to reconnect due to cluster error", amt); + let amt = inner.perf_config.cluster_cache_update_delay_ms(); + _debug!( + inner, + "Waiting {} ms to rebuild cluster state due to cluster error", + amt + ); Some(amt as u64) } else { policy.next_delay() @@ -217,7 +220,8 @@ fn handle_connection_closed( let mut policy = policy.unwrap_or(ReconnectPolicy::Constant { attempts: 0, max_attempts: 0, - delay: globals().cluster_error_cache_delay() as u32, + delay: inner.perf_config.cluster_cache_update_delay_ms() as u32, + jitter: 0, }); let reconnect_inner = inner.clone(); @@ -258,8 +262,10 @@ fn handle_connection_closed( } }; - _info!(inner, "Sleeping for {} ms before reconnecting", next_delay); - sleep(Duration::from_millis(next_delay)).await; + if next_delay > 0 { + _info!(inner, "Sleeping for {} ms before reconnecting", next_delay); + sleep(Duration::from_millis(next_delay)).await; + } let result = if client_utils::is_clustered(&inner.config) { multiplexer.sync_cluster().await @@ -363,7 +369,9 @@ fn should_disable_pipeline(inner: &Arc, command: &RedisCommand // when the final response to EXEC or DISCARD arrives the command buffer will only contain commands that were // a part of the transaction. this makes reconnection logic much easier to reason about in the context of transactions || command.kind == RedisCommandKind::Multi - || command.kind.ends_transaction(); + || command.kind.ends_transaction() + // we also disable pipelining on the HELLO command so that we don't try to decode any in-flight responses with the wrong codec logic + || command.kind.is_hello(); // prefer pipelining for all commands not in a multi block (unless specified above), unless the command is blocking. // but, in the context of a transaction blocking commands can be pipelined since the server responds immediately. @@ -376,7 +384,7 @@ fn check_transaction_hash_slot(inner: &Arc, command: &RedisCom if client_utils::is_clustered(&inner.config) && client_utils::is_locked_some(&inner.multi_block) { if let Some(key) = command.extract_key() { if let Some(policy) = inner.multi_block.write().deref_mut() { - let _ = policy.check_and_set_hash_slot(redis_keyslot(&key))?; + let _ = policy.check_and_set_hash_slot(redis_keyslot(key))?; } } } @@ -411,14 +419,14 @@ async fn handle_write_error( /// Handle the response to the MULTI command, forwarding any errors onto the caller of the next command and returning whether the multiplexers should skip the next command. async fn handle_deferred_multi_response( inner: &Arc, - rx: OneshotReceiver>, + rx: OneshotReceiver>, command: &mut RedisCommand, ) -> bool { match rx.await { Ok(Ok(frame)) => { - if let ProtocolFrame::Error(s) = frame { + if let Resp3Frame::SimpleError { data, .. } = frame { if let Some(tx) = command.tx.take() { - let _ = tx.send(Err(pretty_error(&s))); + let _ = tx.send(Err(pretty_error(&data))); } true } else { @@ -845,6 +853,9 @@ pub async fn init(inner: &Arc, mut policy: Option, mut policy: Option bool { - client_utils::read_atomic(&self.feed_count) > globals().feed_count() + pub fn should_send(&self, inner: &Arc) -> bool { + client_utils::read_atomic(&self.feed_count) > inner.perf_config.max_feed_count() || client_utils::read_atomic(&self.cmd_buffer_len) == 0 } diff --git a/src/multiplexer/responses.rs b/src/multiplexer/responses.rs index 3a1e8d97..d61eeb8c 100644 --- a/src/multiplexer/responses.rs +++ b/src/multiplexer/responses.rs @@ -2,15 +2,15 @@ use crate::error::{RedisError, RedisErrorKind}; use crate::modules::inner::RedisClientInner; use crate::multiplexer::utils; use crate::multiplexer::{Counters, SentCommand, SentCommands}; -use crate::protocol::types::RedisCommandKind; -use crate::protocol::types::{ResponseKind, ValueScanInner, ValueScanResult}; +use crate::protocol::types::{RedisCommandKind, ResponseKind, ValueScanInner, ValueScanResult}; use crate::protocol::utils as protocol_utils; use crate::protocol::utils::{frame_to_error, frame_to_single_result}; use crate::trace; use crate::types::{HScanResult, KeyspaceEvent, RedisKey, RedisValue, SScanResult, ScanResult, ZScanResult}; use crate::utils as client_utils; +use bytes_utils::Str; use parking_lot::{Mutex, RwLock}; -use redis_protocol::resp2::types::Frame as ProtocolFrame; +use redis_protocol::resp3::types::Frame as Resp3Frame; use std::collections::{BTreeMap, BTreeSet, VecDeque}; use std::sync::Arc; @@ -53,11 +53,12 @@ fn sample_command_latencies(inner: &Arc, command: &mut SentCom fn sample_command_latencies(_: &Arc, _: &mut SentCommand) {} /// Merge multiple potentially nested frames into one flat array of frames. -fn merge_multiple_frames(frames: &mut VecDeque) -> ProtocolFrame { +fn merge_multiple_frames(frames: &mut VecDeque) -> Resp3Frame { let inner_len = frames.iter().fold(0, |count, frame| { count + match frame { - ProtocolFrame::Array(ref inner) => inner.len(), + Resp3Frame::Array { ref data, .. } => data.len(), + Resp3Frame::Push { ref data, .. } => data.len(), _ => 1, } }); @@ -66,8 +67,8 @@ fn merge_multiple_frames(frames: &mut VecDeque) -> ProtocolFrame for frame in frames.drain(..) { match frame { - ProtocolFrame::Array(inner) => { - for inner_frame in inner.into_iter() { + Resp3Frame::Array { data, .. } | Resp3Frame::Push { data, .. } => { + for inner_frame in data.into_iter() { out.push(inner_frame); } } @@ -75,11 +76,14 @@ fn merge_multiple_frames(frames: &mut VecDeque) -> ProtocolFrame }; } - ProtocolFrame::Array(out) + Resp3Frame::Array { + data: out, + attributes: None, + } } /// Update the SCAN cursor on a command, changing the internal cursor and the arguments array for the next call to SCAN. -fn update_scan_cursor(inner: &Arc, last_command: &mut SentCommand, cursor: String) { +fn update_scan_cursor(inner: &Arc, last_command: &mut SentCommand, cursor: Str) { if last_command.command.kind.is_scan() { last_command.command.args[0] = cursor.clone().into(); } else if last_command.command.kind.is_value_scan() { @@ -101,10 +105,10 @@ fn update_scan_cursor(inner: &Arc, last_command: &mut SentComm } /// Parse the output of a command that scans keys. -fn handle_key_scan_result(frame: ProtocolFrame) -> Result<(String, Vec), RedisError> { - if let ProtocolFrame::Array(mut frames) = frame { - if frames.len() == 2 { - let cursor = match frames[0].to_string() { +fn handle_key_scan_result(frame: Resp3Frame) -> Result<(Str, Vec), RedisError> { + if let Resp3Frame::Array { mut data, .. } = frame { + if data.len() == 2 { + let cursor = match protocol_utils::frame_to_str(&data[0]) { Some(s) => s, None => { return Err(RedisError::new( @@ -114,11 +118,11 @@ fn handle_key_scan_result(frame: ProtocolFrame) -> Result<(String, Vec } }; - if let Some(ProtocolFrame::Array(results)) = frames.pop() { - let mut keys = Vec::with_capacity(results.len()); + if let Some(Resp3Frame::Array { data, .. }) = data.pop() { + let mut keys = Vec::with_capacity(data.len()); - for frame in results.into_iter() { - let key = match frame.to_string() { + for frame in data.into_iter() { + let key = match protocol_utils::frame_to_bytes(&frame) { Some(s) => s, None => { return Err(RedisError::new( @@ -128,7 +132,7 @@ fn handle_key_scan_result(frame: ProtocolFrame) -> Result<(String, Vec } }; - keys.push(RedisKey::new(key)); + keys.push(key.into()); } Ok((cursor, keys)) @@ -153,10 +157,10 @@ fn handle_key_scan_result(frame: ProtocolFrame) -> Result<(String, Vec } /// Parse the output of a command that scans values. -fn handle_value_scan_result(frame: ProtocolFrame) -> Result<(String, Vec), RedisError> { - if let ProtocolFrame::Array(mut frames) = frame { - if frames.len() == 2 { - let cursor = match frames[0].to_string() { +fn handle_value_scan_result(frame: Resp3Frame) -> Result<(Str, Vec), RedisError> { + if let Resp3Frame::Array { mut data, .. } = frame { + if data.len() == 2 { + let cursor = match protocol_utils::frame_to_str(&data[0]) { Some(s) => s, None => { return Err(RedisError::new( @@ -166,10 +170,10 @@ fn handle_value_scan_result(frame: ProtocolFrame) -> Result<(String, Vec, event: KeyspaceEvent) { } /// Respond to the caller with the output of the command. -fn respond_to_caller(inner: &Arc, last_command: SentCommand, frame: ProtocolFrame) { +fn respond_to_caller(inner: &Arc, last_command: SentCommand, frame: Resp3Frame) { _trace!( inner, "Responding to caller for {}", @@ -437,7 +441,7 @@ fn respond_to_caller_error(inner: &Arc, last_command: SentComm async fn handle_all_nodes_response( inner: &Arc, last_command: SentCommand, - frame: ProtocolFrame, + frame: Resp3Frame, ) -> Option { if let Some(resp) = last_command.command.kind.all_nodes_response() { if frame.is_error() { @@ -456,6 +460,11 @@ async fn handle_all_nodes_response( if resp.decr_num_nodes() == 0 { check_command_resp_tx(inner, &last_command).await; + // if the client sent HELLO to all nodes then wait for the last response to arrive before changing the protocol version + if last_command.command.kind.is_hello() { + update_protocol_version(inner, &last_command, &frame); + } + // take the final response sender off the command and write to that if let Some(tx) = resp.take_tx() { _trace!(inner, "Sending all nodes response after recv all responses."); @@ -480,6 +489,70 @@ async fn handle_all_nodes_response( None } +/// Handle a response frame from a command that expects multiple top-level response frames, such as PSUBSCRIBE. +async fn handle_multiple_responses( + inner: &Arc, + mut last_command: SentCommand, + frame: Resp3Frame, +) -> Result, RedisError> { + let frames = match last_command.command.kind.response_kind_mut() { + Some(kind) => { + if let ResponseKind::Multiple { + ref count, + ref mut buffer, + } = kind + { + buffer.push_back(frame); + + if buffer.len() < *count { + _trace!( + inner, + "Waiting for {} more frames for request with multiple responses.", + count - buffer.len() + ); + None + } else { + _trace!(inner, "Merging {} frames into one response.", buffer.len()); + Some(merge_multiple_frames(buffer)) + } + } else { + _warn!(inner, "Invalid command response kind. Expected multiple responses."); + return Ok(None); + } + } + None => { + _warn!( + inner, + "Failed to read multiple response kind. Dropping response frame..." + ); + return Ok(None); + } + }; + + if let Some(frames) = frames { + check_command_resp_tx(inner, &last_command).await; + respond_to_caller(inner, last_command, frames); + Ok(None) + } else { + // more responses are expected so return the last command to be put back in the queue + Ok(Some(last_command)) + } +} + +/// Update the client's protocol version codec version after receiving a non-error response to HELLO. +fn update_protocol_version(inner: &Arc, last_command: &SentCommand, frame: &Resp3Frame) { + if !frame.is_error() { + let version = match last_command.command.kind { + RedisCommandKind::Hello(ref version) => version, + RedisCommandKind::_HelloAllCluster((_, ref version)) => version, + _ => return, + }; + + // HELLO cannot be pipelined so this is safe + inner.switch_protocol_versions(version.clone()); + } +} + /// Process the frame in the context of the last (oldest) command sent. /// /// If the last command has more expected responses it will be returned so it can be put back on the front of the response queue. @@ -488,7 +561,7 @@ async fn process_response( server: &Arc, counters: &Counters, mut last_command: SentCommand, - frame: ProtocolFrame, + frame: Resp3Frame, ) -> Result, RedisError> { _trace!( inner, @@ -499,47 +572,11 @@ async fn process_response( ); if last_command.command.kind.has_multiple_response_kind() { - let frames = match last_command.command.kind.response_kind_mut() { - Some(kind) => { - if let ResponseKind::Multiple { - ref count, - ref mut buffer, - } = kind - { - buffer.push_back(frame); - - if buffer.len() < *count { - _trace!( - inner, - "Waiting for {} more frames for request with multiple responses.", - count - buffer.len() - ); - None - } else { - _trace!(inner, "Merging {} frames into one response.", buffer.len()); - Some(merge_multiple_frames(buffer)) - } - } else { - _warn!(inner, "Invalid command response kind. Expected multiple responses."); - return Ok(None); - } - } - None => { - _warn!( - inner, - "Failed to read multiple response kind. Dropping response frame..." - ); - return Ok(None); - } - }; - - if let Some(frames) = frames { - check_command_resp_tx(inner, &last_command).await; - respond_to_caller(inner, last_command, frames); - } else { - // more responses are expected so return the last command to be put back in the queue - return Ok(Some(last_command)); - } + // one assumption this makes, which might not be true, is that in cases where multiple responses are sent in separate top-level response frames, + // such as PSUBSCRIBE, that those frames will all arrive without any other command responses interleaved in the middle. i _think_ this is the case, + // but there's a chance it's not when the client is pipelined. if this is not true then i'm not sure what to do here other than to make these + // types of commands non-pipelined in all cases, since there's no mechanism in the protocol to associate out-of-order responses. + return handle_multiple_responses(inner, last_command, frame).await; } else if last_command.command.kind.is_scan() { client_utils::decr_atomic(&counters.in_flight); @@ -551,7 +588,7 @@ async fn process_response( return Ok(None); } }; - let should_stop = next_cursor.as_str() == LAST_CURSOR; + let should_stop = next_cursor == LAST_CURSOR; update_scan_cursor(inner, &mut last_command, next_cursor); check_command_resp_tx(inner, &last_command).await; @@ -570,7 +607,7 @@ async fn process_response( return Ok(None); } }; - let should_stop = next_cursor.as_str() == LAST_CURSOR; + let should_stop = next_cursor == LAST_CURSOR; update_scan_cursor(inner, &mut last_command, next_cursor); check_command_resp_tx(inner, &last_command).await; @@ -584,6 +621,11 @@ async fn process_response( client_utils::decr_atomic(&counters.in_flight); sample_command_latencies(inner, &mut last_command); + // update the protocol version after a non-error response is received from HELLO + if last_command.command.kind.is_hello() { + update_protocol_version(inner, &last_command, &frame); + } + check_command_resp_tx(inner, &last_command).await; respond_to_caller(inner, last_command, frame); } @@ -641,51 +683,98 @@ fn parse_keyspace_notification(channel: String, message: RedisValue) -> Result, frame: ProtocolFrame) -> Option { +/// Check for the various pubsub formats for both RESP2 and RESP3. +fn check_pubsub_formats(frame: &Resp3Frame) -> (bool, bool) { if frame.is_pubsub_message() { - let span = if inner.should_trace() { - let span = trace::create_pubsub_span(inner, &frame); - Some(span) - } else { - None - }; + return (true, false); + } - _trace!(inner, "Processing pubsub message."); - let parsed_frame = if let Some(ref span) = span { - let _enter = span.enter(); - protocol_utils::frame_to_pubsub(frame) - } else { - protocol_utils::frame_to_pubsub(frame) - }; + // otherwise check for RESP2 formats automatically converted to RESP3 by the codec + let data = match frame { + Resp3Frame::Array { ref data, .. } => data, + Resp3Frame::Push { ref data, .. } => data, + _ => return (false, false), + }; - let (channel, message) = match parsed_frame { - Ok(data) => data, - Err(err) => { - _warn!(inner, "Invalid message on pubsub interface: {:?}", err); - return None; - } - }; - if let Some(ref span) = span { - span.record("channel", &channel.as_str()); - } + // RESP2 and RESP3 differ in that RESP3 contains an additional "pubsub" string frame at the start + // so here we check the frame contents according to the RESP2 pubsub rules + ( + false, + (data.len() == 3 || data.len() == 4) + && data[0] + .as_str() + .map(|s| s == "message" || s == "pmessage") + .unwrap_or(false), + ) +} - match parse_keyspace_notification(channel, message) { - Ok(event) => emit_keyspace_event(inner, event), - Err((channel, message)) => emit_pubsub_message(inner, channel, message), - }; +/// Try to parse the frame in either RESP2 or RESP3 pubsub formats. +fn parse_pubsub_message( + frame: Resp3Frame, + is_resp3: bool, + is_resp2: bool, +) -> Result<(String, RedisValue), RedisError> { + if is_resp3 { + protocol_utils::frame_to_pubsub(frame) + } else if is_resp2 { + // this is safe to do in limited circumstances like this since RESP2 and RESP3 pubsub arrays are similar enough + protocol_utils::parse_as_resp2_pubsub(frame) + } else { + Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Invalid pubsub message.", + )) + } +} + +/// Check if the frame is part of a pubsub message, and if so route it to any listeners. +/// +/// If not then return it to the caller for further processing. +fn check_pubsub_message(inner: &Arc, frame: Resp3Frame) -> Option { + // in this case using resp3 frames can cause issues, since resp3 push commands are represented + // differently than resp2 array frames. to fix this we convert back to resp2 here if needed. + let (is_resp3_pubsub, is_resp2_pubsub) = check_pubsub_formats(&frame); + if !is_resp3_pubsub && !is_resp2_pubsub { + return Some(frame); + } + let span = if inner.should_trace() { + let span = trace::create_pubsub_span(inner, &frame); + Some(span) + } else { None + }; + + _trace!(inner, "Processing pubsub message."); + let parsed_frame = if let Some(ref span) = span { + let _enter = span.enter(); + parse_pubsub_message(frame, is_resp3_pubsub, is_resp2_pubsub) } else { - Some(frame) + parse_pubsub_message(frame, is_resp3_pubsub, is_resp2_pubsub) + }; + + let (channel, message) = match parsed_frame { + Ok(data) => data, + Err(err) => { + _warn!(inner, "Invalid message on pubsub interface: {:?}", err); + return None; + } + }; + if let Some(ref span) = span { + span.record("channel", &channel.as_str()); } + + match parse_keyspace_notification(channel, message) { + Ok(event) => emit_keyspace_event(inner, event), + Err((channel, message)) => emit_pubsub_message(inner, channel, message), + }; + + None } #[cfg(feature = "reconnect-on-auth-error")] /// Parse the response frame to see if it's an auth error. -fn parse_redis_auth_error(frame: &ProtocolFrame) -> Option { +fn parse_redis_auth_error(frame: &Resp3Frame) -> Option { if frame.is_error() { match frame_to_single_result(frame.clone()) { Ok(_) => None, @@ -701,7 +790,7 @@ fn parse_redis_auth_error(frame: &ProtocolFrame) -> Option { #[cfg(not(feature = "reconnect-on-auth-error"))] /// Parse the response frame to see if it's an auth error. -fn parse_redis_auth_error(_frame: &ProtocolFrame) -> Option { +fn parse_redis_auth_error(_frame: &Resp3Frame) -> Option { None } @@ -792,9 +881,9 @@ fn last_clustered_command_ends_transaction( } /// Whether or not the response is a QUEUED response to a command within a transaction. -fn response_is_queued(frame: &ProtocolFrame) -> bool { +fn response_is_queued(frame: &Resp3Frame) -> bool { match frame { - ProtocolFrame::SimpleString(ref s) => s == "QUEUED", + Resp3Frame::SimpleString { ref data, .. } => data == "QUEUED", _ => false, } } @@ -845,7 +934,7 @@ async fn end_centralized_multi_block( inner: &Arc, counters: &Counters, commands: &Arc>, - frame: ProtocolFrame, + frame: Resp3Frame, ending_cmd: TransactionEnded, ) -> Result<(), RedisError> { if !client_utils::is_locked_some(&inner.multi_block) { @@ -856,7 +945,8 @@ async fn end_centralized_multi_block( } counters.decr_in_flight(); - if ending_cmd == TransactionEnded::Discard || (ending_cmd == TransactionEnded::Exec && frame.is_null()) { + let frame_is_null = protocol_utils::is_null(&frame); + if ending_cmd == TransactionEnded::Discard || (ending_cmd == TransactionEnded::Exec && frame_is_null) { // the transaction was discarded or aborted due to a WATCH condition failing _trace!(inner, "Ending transaction with discard or null response"); let recent_cmd = take_most_recent_centralized_command(commands); @@ -910,7 +1000,7 @@ async fn end_clustered_multi_block( server: &Arc, counters: &Arc, Counters>>>, commands: &Arc, SentCommands>>>, - frame: ProtocolFrame, + frame: Resp3Frame, ending_cmd: TransactionEnded, ) -> Result<(), RedisError> { if !client_utils::is_locked_some(&inner.multi_block) { @@ -923,7 +1013,8 @@ async fn end_clustered_multi_block( counters.decr_in_flight(); } - if ending_cmd == TransactionEnded::Discard || (ending_cmd == TransactionEnded::Exec && frame.is_null()) { + let frame_is_null = protocol_utils::is_null(&frame); + if ending_cmd == TransactionEnded::Discard || (ending_cmd == TransactionEnded::Exec && frame_is_null) { // the transaction was discarded or aborted due to a WATCH condition failing _trace!(inner, "Ending transaction with discard or null response."); let recent_cmd = take_most_recent_cluster_command(commands, server); @@ -975,7 +1066,7 @@ async fn handle_clustered_queued_response( server: &Arc, counters: &Arc, Counters>>>, commands: &Arc, VecDeque>>>, - frame: ProtocolFrame, + frame: Resp3Frame, ) -> Result<(), RedisError> { let multi_block = match client_utils::read_locked(&inner.multi_block) { Some(blk) => blk, @@ -1018,7 +1109,7 @@ async fn handle_centralized_queued_response( inner: &Arc, counters: &Counters, commands: &Arc>, - frame: ProtocolFrame, + frame: Resp3Frame, ) -> Result<(), RedisError> { let multi_block = match client_utils::read_locked(&inner.multi_block) { Some(blk) => blk, @@ -1057,7 +1148,7 @@ async fn handle_centralized_queued_response( } /// Check if the frame represents a MOVED or ASK error. -fn check_redirection_error(inner: &Arc, frame: &ProtocolFrame) -> Option { +fn check_redirection_error(inner: &Arc, frame: &Resp3Frame) -> Option { if frame.is_moved_or_ask_error() { let error = frame_to_error(frame).unwrap_or(RedisError::new(RedisErrorKind::Cluster, "MOVED or ASK error.")); utils::emit_error(&inner, &error); @@ -1069,12 +1160,12 @@ fn check_redirection_error(inner: &Arc, frame: &ProtocolFrame) } #[cfg(feature = "custom-reconnect-errors")] -fn check_global_reconnect_errors(inner: &Arc, frame: &ProtocolFrame) -> Option { - if let ProtocolFrame::Error(ref message) = frame { +fn check_global_reconnect_errors(inner: &Arc, frame: &Resp3Frame) -> Option { + if let Resp3Frame::SimpleError { ref data, .. } = frame { for prefix in globals().reconnect_errors.read().iter() { - if message.starts_with(prefix.to_str()) { - _warn!(inner, "Found reconnection error: {}", message); - let error = protocol_utils::pretty_error(message); + if data.starts_with(prefix.to_str()) { + _warn!(inner, "Found reconnection error: {}", data); + let error = protocol_utils::pretty_error(data); utils::emit_error(inner, &error); return Some(error); } @@ -1087,12 +1178,12 @@ fn check_global_reconnect_errors(inner: &Arc, frame: &Protocol } #[cfg(not(feature = "custom-reconnect-errors"))] -fn check_global_reconnect_errors(_: &Arc, _: &ProtocolFrame) -> Option { +fn check_global_reconnect_errors(_: &Arc, _: &Resp3Frame) -> Option { None } /// Check for special errors configured by the caller to initiate a reconnection process. -fn check_special_errors(inner: &Arc, frame: &ProtocolFrame) -> Option { +fn check_special_errors(inner: &Arc, frame: &Resp3Frame) -> Option { if let Some(auth_error) = parse_redis_auth_error(frame) { // this closes the stream and initiates a reconnect, if applicable return Some(auth_error); @@ -1124,7 +1215,7 @@ pub async fn process_clustered_frame( server: &Arc, counters: &Arc, Counters>>>, commands: &Arc, VecDeque>>>, - frame: ProtocolFrame, + frame: Resp3Frame, ) -> Result<(), RedisError> { if let Some(error) = check_redirection_error(inner, &frame) { handle_redirection_error(inner, server, commands, error)?; @@ -1182,7 +1273,7 @@ pub async fn process_centralized_frame( server: &Arc, counters: &Counters, commands: &Arc>, - frame: ProtocolFrame, + frame: Resp3Frame, ) -> Result<(), RedisError> { if let Some(error) = check_special_errors(inner, &frame) { // this closes the stream and initiates a reconnect, if configured @@ -1195,7 +1286,6 @@ pub async fn process_centralized_frame( return Ok(()); } - // TODO change this so we can check the last command without contending for a lock if let Some(trx_ended) = last_centralized_command_ends_transaction(commands).await { end_centralized_multi_block(inner, counters, commands, frame, trx_ended).await } else { diff --git a/src/multiplexer/sentinel.rs b/src/multiplexer/sentinel.rs index d36d4fd2..f056272c 100644 --- a/src/multiplexer/sentinel.rs +++ b/src/multiplexer/sentinel.rs @@ -1,12 +1,12 @@ use crate::error::{RedisError, RedisErrorKind}; use crate::globals::globals; use crate::modules::inner::RedisClientInner; -use crate::modules::types::ClientState; use crate::multiplexer::{utils, CloseTx, Connections, Counters, SentCommand}; use crate::protocol::codec::RedisCodec; -use crate::protocol::connection::{self, authenticate, FramedTcp, FramedTls, RedisTransport}; +use crate::protocol::connection::{self, authenticate, select_database, FramedTcp, FramedTls, RedisTransport}; use crate::protocol::types::{RedisCommand, RedisCommandKind}; use crate::protocol::utils as protocol_utils; +use crate::types::ClientState; use crate::types::Resolve; use crate::types::{RedisValue, ServerConfig}; use crate::utils as client_utils; @@ -88,16 +88,26 @@ pub async fn create_authenticated_connection_tls( let server = format!("{}:{}", addr.ip().to_string(), addr.port()); let codec = RedisCodec::new(inner, server); let client_name = inner.client_name(); - let (username, password) = if is_sentinel { - read_sentinel_auth(inner)? + let ((username, password), is_resp3) = if is_sentinel { + (read_sentinel_auth(inner)?, false) } else { - read_redis_auth(inner) + (read_redis_auth(inner), inner.is_resp3()) }; let socket = TcpStream::connect(addr).await?; let tls_stream = tls::create_tls_connector(&inner.config)?; let socket = tls_stream.connect(domain, socket).await?; - let framed = authenticate(Framed::new(socket, codec), &client_name, username, password).await?; + let framed = if is_sentinel { + Framed::new(socket, codec) + } else { + connection::switch_protocols(inner, Framed::new(socket, codec)).await? + }; + let framed = authenticate(framed, &client_name, username, password, is_resp3).await?; + let framed = if is_sentinel { + framed + } else { + select_database(inner, framed).await? + }; Ok(framed) } @@ -120,14 +130,24 @@ pub async fn create_authenticated_connection( let server = format!("{}:{}", addr.ip().to_string(), addr.port()); let codec = RedisCodec::new(inner, server); let client_name = inner.client_name(); - let (username, password) = if is_sentinel { - read_sentinel_auth(inner)? + let ((username, password), is_resp3) = if is_sentinel { + (read_sentinel_auth(inner)?, false) } else { - read_redis_auth(inner) + (read_redis_auth(inner), inner.is_resp3()) }; let socket = TcpStream::connect(addr).await?; - let framed = authenticate(Framed::new(socket, codec), &client_name, username, password).await?; + let framed = if is_sentinel { + Framed::new(socket, codec) + } else { + connection::switch_protocols(inner, Framed::new(socket, codec)).await? + }; + let framed = authenticate(framed, &client_name, username, password, is_resp3).await?; + let framed = if is_sentinel { + framed + } else { + select_database(inner, framed).await? + }; Ok(framed) } @@ -166,8 +186,8 @@ async fn read_primary_node_address( vec!["get-master-addr-by-name".into(), server_name.into()], None, ); - let (frame, transport) = stry!(connection::transport_request_response(transport, &request).await); - let result = stry!(protocol_utils::frame_to_results(frame)); + let (frame, transport) = stry!(connection::transport_request_response(transport, &request, false).await); + let result = stry!(protocol_utils::frame_to_results(frame.into_resp3())); let (host, port): (String, u16) = stry!(result.convert()); let addr = stry!(inner.resolver.resolve(host.clone(), port).await); @@ -230,8 +250,9 @@ async fn connect_and_check_primary_role( let transport = stry!(connect_to_server(inner, host, addr, DEFAULT_CONNECTION_TIMEOUT_MS, false).await); _debug!(inner, "Checking role for redis server at {}:{}", host, addr.port()); - let (frame, transport) = stry!(connection::transport_request_response(transport, &request).await); - let result = stry!(protocol_utils::frame_to_results(frame)); + let is_resp3 = inner.is_resp3(); + let (frame, transport) = stry!(connection::transport_request_response(transport, &request, is_resp3).await); + let result = stry!(protocol_utils::frame_to_results(frame.into_resp3())); if let RedisValue::Array(values) = result { if let Some(first) = values.first() { @@ -407,8 +428,8 @@ pub async fn update_sentinel_nodes( ) -> Result<(), RedisError> { _debug!(inner, "Reading sentinel nodes..."); let command = RedisCommand::new(RedisCommandKind::Sentinel, vec!["sentinels".into(), name.into()], None); - let (frame, _) = stry!(connection::transport_request_response(transport, &command).await); - let response = stry!(protocol_utils::frame_to_results(frame)); + let (frame, _) = stry!(connection::transport_request_response(transport, &command, false).await); + let response = stry!(protocol_utils::frame_to_results(frame.into_resp3())); _trace!(inner, "Read sentinel response: {:?}", response); let sentinel_nodes = stry!(parse_sentinel_nodes_response(inner, response)); diff --git a/src/multiplexer/utils.rs b/src/multiplexer/utils.rs index 0bc73aea..604d84e0 100644 --- a/src/multiplexer/utils.rs +++ b/src/multiplexer/utils.rs @@ -1,6 +1,5 @@ -use crate::client::RedisClient; +use crate::clients::RedisClient; use crate::error::{RedisError, RedisErrorKind}; -use crate::globals::globals; use crate::modules::inner::{ClosedState, RedisClientInner}; use crate::multiplexer::types::ClusterChange; use crate::multiplexer::{responses, Multiplexer}; @@ -8,6 +7,7 @@ use crate::multiplexer::{Backpressure, CloseTx, Connections, Counters, SentComma use crate::protocol::connection::{self, RedisSink, RedisStream}; use crate::protocol::types::*; use crate::protocol::utils as protocol_utils; +use crate::protocol::utils::server_to_parts; use crate::trace; use crate::types::*; use crate::utils as client_utils; @@ -17,12 +17,12 @@ use futures::select; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use log::Level; use parking_lot::{Mutex, RwLock}; -use std::cmp; +use redis_protocol::resp3::types::Frame as Resp3Frame; use std::collections::{BTreeMap, BTreeSet, VecDeque}; -use std::mem; use std::ops::DerefMut; use std::sync::Arc; use std::time::{Duration, Instant}; +use std::{cmp, mem, str}; use tokio; use tokio::sync::broadcast::{channel as broadcast_channel, Receiver as BroadcastReceiver}; use tokio::sync::mpsc::UnboundedSender; @@ -188,7 +188,7 @@ pub async fn insert_locked_map_async(locked: &AsyncRwLock, command: &mut RedisCommand) -> bool { - if command.max_attempts_exceeded() { + if command.max_attempts_exceeded(inner) { _warn!( inner, "Exceeded max write attempts for command: {}", @@ -215,7 +215,11 @@ pub fn max_attempts_reached(inner: &Arc, command: &mut RedisCo } } -pub fn should_apply_backpressure(connections: &Connections, server: Option<&Arc>) -> Option { +pub fn should_apply_backpressure( + inner: &Arc, + connections: &Connections, + server: Option<&Arc>, +) -> Result, RedisError> { let in_flight = match connections { Connections::Centralized { ref counters, .. } => client_utils::read_atomic(&counters.in_flight), Connections::Clustered { ref counters, .. } => server @@ -227,14 +231,28 @@ pub fn should_apply_backpressure(connections: &Connections, server: Option<&Arc< }) .unwrap_or(0), }; - let min_backpressure_time_ms = globals().min_backpressure_time_ms(); - let backpressure_command_count = globals().backpressure_count(); + let min_backpressure_time_ms = inner.perf_config.min_sleep_duration(); + let backpressure_command_count = inner.perf_config.max_in_flight_commands(); + let disable_backpressure_scaling = inner.perf_config.disable_backpressure_scaling(); + + let amt = if in_flight > backpressure_command_count { + if inner.perf_config.disable_auto_backpressure() { + return Err(RedisError::new( + RedisErrorKind::Backpressure, + "Max number of in-flight commands reached.", + )); + } - if in_flight > backpressure_command_count { - Some(cmp::max(in_flight - backpressure_command_count, min_backpressure_time_ms) as u64) + if disable_backpressure_scaling { + Some(min_backpressure_time_ms as u64) + } else { + Some(cmp::max(min_backpressure_time_ms as u64, in_flight as u64)) + } } else { None - } + }; + + Ok(amt) } pub fn centralized_server_name(inner: &Arc) -> String { @@ -319,8 +337,8 @@ pub fn prepare_command( inner: &Arc, counters: &Counters, command: RedisCommand, -) -> Result<(SentCommand, Frame, bool), RedisError> { - let frame = command.to_frame()?; +) -> Result<(SentCommand, ProtocolFrame, bool), RedisError> { + let frame = command.to_frame(inner.is_resp3())?; let mut sent_command: SentCommand = command.into(); sent_command.command.incr_attempted(); sent_command.network_start = Some(Instant::now()); @@ -332,10 +350,13 @@ pub fn prepare_command( // * we've fed up to the global max feed count commands already // * the command closes the connection // * the command ends a transaction + // * the command does some form of authentication // * the command blocks the multiplexer command loop - let should_flush = counters.should_send() + let should_flush = counters.should_send(inner) || sent_command.command.is_quit() || sent_command.command.kind.ends_transaction() + || sent_command.command.kind.is_hello() + || sent_command.command.kind.is_auth() || client_utils::is_locked_some(&sent_command.command.resp_tx); Ok((sent_command, frame, should_flush)) @@ -396,6 +417,21 @@ pub async fn send_clustered_command( connection::write_command(inner, writer, counters, frame, should_flush).await } +fn respond_early_to_caller_error(inner: &Arc, mut command: RedisCommand, error: RedisError) { + _debug!(inner, "Responding early to caller with error {:?}", error); + + if let Some(tx) = command.tx.take() { + if let Err(e) = tx.send(Err(error)) { + _warn!(inner, "Error sending response to caller: {:?}", e); + } + } + + // check for a multiplexer response sender too + if let Some(tx) = command.resp_tx.write().take() { + let _ = tx.send(()); + } +} + pub async fn write_centralized_command( inner: &Arc, connections: &Connections, @@ -403,7 +439,15 @@ pub async fn write_centralized_command( no_backpressure: bool, ) -> Result { if !no_backpressure { - if let Some(backpressure) = should_apply_backpressure(connections, None) { + let backpressure = match should_apply_backpressure(inner, connections, None) { + Ok(backpressure) => backpressure, + Err(e) => { + respond_early_to_caller_error(inner, command, e); + return Ok(Backpressure::Skipped); + } + }; + + if let Some(backpressure) = backpressure { _warn!(inner, "Applying backpressure for {} ms", backpressure); return Ok(Backpressure::Wait((Duration::from_millis(backpressure), command))); } @@ -456,7 +500,7 @@ pub async fn write_clustered_command( { let hash_slot = match hash_slot { Some(slot) => Some(slot), - None => command.extract_key().map(|key| redis_keyslot(&key)), + None => command.extract_key().map(|key| redis_keyslot(key)), }; let server = match hash_slot { Some(hash_slot) => match cache.read().get_server(hash_slot) { @@ -482,7 +526,15 @@ pub async fn write_clustered_command( }; if !no_backpressure { - if let Some(backpressure) = should_apply_backpressure(connections, Some(&server)) { + let backpressure = match should_apply_backpressure(inner, connections, Some(&server)) { + Ok(backpressure) => backpressure, + Err(e) => { + respond_early_to_caller_error(inner, command, e); + return Ok(Backpressure::Skipped); + } + }; + + if let Some(backpressure) = backpressure { _warn!(inner, "Applying backpressure for {} ms", backpressure); return Ok(Backpressure::Wait((Duration::from_millis(backpressure), command))); } @@ -494,7 +546,7 @@ pub async fn write_clustered_command( "Using server {} with hash slot {:?} from key {}", server, hash_slot, - key + String::from_utf8_lossy(key) ); } } @@ -633,6 +685,7 @@ pub fn spawn_clustered_listener( RedisStream::Tls(stream) => Either::Left( stream .try_fold(memo, |(inner, server, counters, commands), frame| async { + let frame = frame.into_resp3(); responses::process_clustered_frame(&inner, &server, &counters, &commands, frame).await?; Ok((inner, server, counters, commands)) }) @@ -641,6 +694,7 @@ pub fn spawn_clustered_listener( RedisStream::Tcp(stream) => Either::Right( stream .try_fold(memo, |(inner, server, counters, commands), frame| async { + let frame = frame.into_resp3(); responses::process_clustered_frame(&inner, &server, &counters, &commands, frame).await?; Ok((inner, server, counters, commands)) }) @@ -804,6 +858,7 @@ pub fn spawn_centralized_listener( RedisStream::Tls(stream) => Either::Left( stream .try_fold(memo, |(inner, server, counters, commands), frame| async { + let frame = frame.into_resp3(); responses::process_centralized_frame(&inner, &server, &counters, &commands, frame).await?; Ok((inner, server, counters, commands)) }) @@ -812,6 +867,7 @@ pub fn spawn_centralized_listener( RedisStream::Tcp(stream) => Either::Right( stream .try_fold(memo, |(inner, server, counters, commands), frame| async { + let frame = frame.into_resp3(); responses::process_centralized_frame(&inner, &server, &counters, &commands, frame).await?; Ok((inner, server, counters, commands)) }) @@ -931,18 +987,18 @@ pub fn check_mget_cluster_keys(multiplexer: &Multiplexer, keys: &Vec let mut nodes = BTreeSet::new(); for key in keys.iter() { - let key_str = match key.as_str() { + let key_bytes = match key.as_bytes() { Some(s) => s, - None => return Err(RedisError::new(RedisErrorKind::InvalidArgument, "Expected key string.")), + None => return Err(RedisError::new(RedisErrorKind::InvalidArgument, "Expected key bytes.")), }; - let hash_slot = redis_protocol::redis_keyslot(&key_str); + let hash_slot = redis_protocol::redis_keyslot(key_bytes); let server = match cache.read().get_server(hash_slot) { Some(s) => s.id.clone(), None => { return Err(RedisError::new( RedisErrorKind::InvalidArgument, - format!("Failed to find cluster node for {}", key_str), - )) + "Failed to find cluster node", + )); } }; @@ -974,18 +1030,18 @@ pub fn check_mset_cluster_keys(multiplexer: &Multiplexer, args: &Vec let mut nodes = BTreeSet::new(); for chunk in args.chunks(2) { - let key = match chunk[0].as_str() { + let key = match chunk[0].as_bytes() { Some(s) => s, - None => return Err(RedisError::new(RedisErrorKind::InvalidArgument, "Expected key string.")), + None => return Err(RedisError::new(RedisErrorKind::InvalidArgument, "Expected key bytes.")), }; - let hash_slot = redis_protocol::redis_keyslot(&key); + let hash_slot = redis_protocol::redis_keyslot(key); let server = match cache.read().get_server(hash_slot) { Some(s) => s.id.clone(), None => { return Err(RedisError::new( RedisErrorKind::InvalidArgument, - format!("Failed to find cluster node for {}", key), - )) + "Failed to find cluster node.", + )); } }; @@ -1107,15 +1163,24 @@ async fn existing_backchannel_connection(inner: &Arc, servers: } async fn cluster_nodes_backchannel(inner: &Arc) -> Result { - let mut servers: Vec> = inner - .config - .read() - .server - .hosts() - .iter() - .map(|(h, p)| Arc::new(format!("{}:{}", h, p))) - .collect(); + let mut servers = if let Some(ref state) = *inner.cluster_state.read() { + state.unique_main_nodes() + } else { + _debug!( + inner, + "Falling back to hosts from config in cluster backchannel due to missing cluster state." + ); + inner + .config + .read() + .server + .hosts() + .iter() + .map(|(h, p)| Arc::new(format!("{}:{}", h, p))) + .collect() + }; + _debug!(inner, "Creating or using backchannel from {:?}", servers); if let Some(swap) = existing_backchannel_connection(inner, &servers).await { servers.swap(0, swap); } @@ -1125,7 +1190,7 @@ async fn cluster_nodes_backchannel(inner: &Arc) -> Result frame, Err(e) => { _warn!(inner, "Error creating or using backchannel for cluster nodes: {:?}", e); @@ -1133,8 +1198,8 @@ async fn cluster_nodes_backchannel(inner: &Arc) -> Result state, Err(e) => { @@ -1158,6 +1223,78 @@ async fn cluster_nodes_backchannel(inner: &Arc) -> Result, changes: Vec) { + let mut to_remove = BTreeSet::new(); + + // check for closed senders as we emit messages, and drop them at the end + { + for (idx, tx) in inner.cluster_change_tx.read().iter().enumerate() { + if let Err(_) = tx.send(changes.clone()) { + to_remove.insert(idx); + } + } + } + + if !to_remove.is_empty() { + _trace!(inner, "Removing {} closed cluster change listeners", to_remove.len()); + let mut message_tx_guard = inner.cluster_change_tx.write(); + let message_tx_ref = &mut *message_tx_guard; + + let mut new_listeners = VecDeque::with_capacity(message_tx_ref.len() - to_remove.len()); + + for (idx, tx) in message_tx_ref.drain(..).enumerate() { + if !to_remove.contains(&idx) { + new_listeners.push_back(tx); + } + } + *message_tx_ref = new_listeners; + } +} + +fn broadcast_cluster_changes(inner: &Arc, changes: &ClusterChange) { + let has_listeners = { inner.cluster_change_tx.read().len() > 0 }; + + if has_listeners { + let (added, removed) = { + let mut added = Vec::with_capacity(changes.add.len()); + let mut removed = Vec::with_capacity(changes.remove.len()); + + for server in changes.add.iter() { + let parts = match server_to_parts(server) { + Ok((host, port)) => (host.to_owned(), port), + Err(_) => continue, + }; + + added.push(parts); + } + for server in changes.remove.iter() { + let parts = match server_to_parts(server) { + Ok((host, port)) => (host.to_owned(), port), + Err(_) => continue, + }; + + removed.push(parts); + } + + (added, removed) + }; + let mut changes = Vec::with_capacity(added.len() + removed.len() + 1); + if added.is_empty() && removed.is_empty() { + changes.push(ClusterStateChange::Rebalance); + } else { + for parts in added.into_iter() { + changes.push(ClusterStateChange::Add(parts)) + } + for parts in removed.into_iter() { + changes.push(ClusterStateChange::Remove(parts)); + } + } + + emit_cluster_changes(inner, changes); + } +} + pub async fn sync_cluster( inner: &Arc, connections: &Connections, @@ -1182,6 +1319,7 @@ pub async fn sync_cluster( }; let changes = create_cluster_change(&cluster_state, &writers).await; _debug!(inner, "Changing cluster connections: {:?}", changes); + broadcast_cluster_changes(inner, &changes); for removed_server in changes.remove.into_iter() { remove_server(inner, counters, writers, commands, connection_ids, &removed_server).await?; diff --git a/src/protocol/codec.rs b/src/protocol/codec.rs index 4b4171a2..a4c00d32 100644 --- a/src/protocol/codec.rs +++ b/src/protocol/codec.rs @@ -1,10 +1,15 @@ -use crate::error::RedisError; +use crate::error::{RedisError, RedisErrorKind}; use crate::modules::inner::RedisClientInner; +use crate::protocol::types::ProtocolFrame; use crate::protocol::utils as protocol_utils; use bytes::BytesMut; -use redis_protocol::resp2::decode::decode as resp2_decode; +use redis_protocol::resp2::decode::decode_mut as resp2_decode; use redis_protocol::resp2::encode::encode_bytes as resp2_encode; use redis_protocol::resp2::types::Frame as Resp2Frame; +use redis_protocol::resp3::decode::streaming::decode_mut as resp3_decode; +use redis_protocol::resp3::encode::complete::encode_bytes as resp3_encode; +use redis_protocol::resp3::types::RespVersion; +use redis_protocol::resp3::types::{Frame as Resp3Frame, StreamedFrame}; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; @@ -12,44 +17,18 @@ use tokio_util::codec::{Decoder, Encoder}; use crate::globals::globals; #[cfg(feature = "metrics")] use crate::modules::metrics::MovingStats; +use arc_swap::ArcSwap; #[cfg(feature = "metrics")] use parking_lot::RwLock; -#[cfg(feature = "network-logs")] -use std::str; #[cfg(not(feature = "network-logs"))] fn log_resp2_frame(_: &str, _: &Resp2Frame, _: bool) {} - -#[cfg(feature = "network-logs")] -#[derive(Debug)] -enum DebugFrame { - String(String), - Bytes(Vec), - Integer(i64), - Array(Vec), -} - +#[cfg(not(feature = "network-logs"))] +fn log_resp3_frame(_: &str, _: &Resp3Frame, _: bool) {} #[cfg(feature = "network-logs")] -impl<'a> From<&'a Resp2Frame> for DebugFrame { - fn from(f: &'a Resp2Frame) -> Self { - match f { - Resp2Frame::Error(s) | Resp2Frame::SimpleString(s) => DebugFrame::String(s.to_owned()), - Resp2Frame::Integer(i) => DebugFrame::Integer(*i), - Resp2Frame::BulkString(b) => match str::from_utf8(b) { - Ok(s) => DebugFrame::String(s.to_owned()), - Err(_) => DebugFrame::Bytes(b.to_vec()), - }, - Resp2Frame::Null => DebugFrame::String("nil".into()), - Resp2Frame::Array(frames) => DebugFrame::Array(frames.iter().map(|f| f.into()).collect()), - } - } -} - +pub use crate::protocol::debug::log_resp2_frame; #[cfg(feature = "network-logs")] -fn log_resp2_frame(name: &str, frame: &Resp2Frame, encode: bool) { - let prefix = if encode { "Encoded" } else { "Decoded" }; - trace!("{}: {} {:?}", name, prefix, DebugFrame::from(frame)) -} +pub use crate::protocol::debug::log_resp3_frame; #[cfg(feature = "metrics")] fn sample_stats(codec: &RedisCodec, decode: bool, value: i64) { @@ -70,7 +49,7 @@ fn resp2_encode_frame(codec: &RedisCodec, item: Resp2Frame, dst: &mut BytesMut) let len = res.saturating_sub(offset); trace!( - "{}: Encoded {} bytes to {}. Buffer len: {}", + "{}: Encoded {} bytes to {}. Buffer len: {} (RESP2)", codec.name, len, codec.server, @@ -83,26 +62,130 @@ fn resp2_encode_frame(codec: &RedisCodec, item: Resp2Frame, dst: &mut BytesMut) } fn resp2_decode_frame(codec: &RedisCodec, src: &mut BytesMut) -> Result, RedisError> { - trace!("{}: Recv {} bytes from {}.", codec.name, src.len(), codec.server); + trace!( + "{}: Recv {} bytes from {} (RESP2).", + codec.name, + src.len(), + codec.server + ); if src.is_empty() { return Ok(None); } - if let Some((frame, amt)) = resp2_decode(src)? { + if let Some((frame, amt, _)) = resp2_decode(src)? { trace!("{}: Parsed {} bytes from {}", codec.name, amt, codec.server); log_resp2_frame(&codec.name, &frame, false); sample_stats(&codec, true, amt as i64); - let _ = src.split_to(amt); - Ok(Some(protocol_utils::check_auth_error(frame))) + Ok(Some(protocol_utils::check_resp2_auth_error(frame))) + } else { + Ok(None) + } +} + +fn resp3_encode_frame(codec: &RedisCodec, item: Resp3Frame, dst: &mut BytesMut) -> Result<(), RedisError> { + let offset = dst.len(); + + let res = resp3_encode(dst, &item)?; + let len = res.saturating_sub(offset); + + trace!( + "{}: Encoded {} bytes to {}. Buffer len: {} (RESP3)", + codec.name, + len, + codec.server, + res + ); + log_resp3_frame(&codec.name, &item, true); + sample_stats(&codec, false, len as i64); + + Ok(()) +} + +fn resp3_decode_frame(codec: &mut RedisCodec, src: &mut BytesMut) -> Result, RedisError> { + trace!( + "{}: Recv {} bytes from {} (RESP3).", + codec.name, + src.len(), + codec.server + ); + if src.is_empty() { + return Ok(None); + } + + if let Some((frame, amt, _)) = resp3_decode(src)? { + sample_stats(&codec, true, amt as i64); + + if codec.streaming_state.is_some() && frame.is_streaming() { + return Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Cannot start a stream while already inside a stream.", + )); + } + + let result = if let Some(ref mut streamed_frame) = codec.streaming_state { + // we started receiving streamed data earlier + let frame = frame.into_complete_frame()?; + streamed_frame.add_frame(frame); + + if streamed_frame.is_finished() { + let frame = streamed_frame.into_frame()?; + trace!("{}: Ending {:?} stream", codec.name, frame.kind()); + log_resp3_frame(&codec.name, &frame, false); + Some(frame) + } else { + trace!("{}: Continuing {:?} stream", codec.name, streamed_frame.kind); + None + } + } else { + // we're processing a complete frame or starting a new streamed frame + if frame.is_streaming() { + let frame = frame.into_streaming_frame()?; + trace!("{}: Starting {:?} stream", codec.name, frame.kind); + codec.streaming_state = Some(frame); + None + } else { + // we're not in the middle of a stream and we found a complete frame + let frame = frame.into_complete_frame()?; + log_resp3_frame(&codec.name, &frame, false); + Some(protocol_utils::check_resp3_auth_error(frame)) + } + }; + + if result.is_some() { + let _ = codec.streaming_state.take(); + } + Ok(result) } else { Ok(None) } } +/// Attempt to decode with RESP2, and if that fails try once with RESP3. +/// +/// This is useful when handling HELLO commands sent in the middle of a RESP2 command sequence. +fn resp2_decode_with_fallback( + codec: &mut RedisCodec, + src: &mut BytesMut, +) -> Result, RedisError> { + let resp2_result = resp2_decode_frame(codec, src).map(|f| f.map(|f| f.into())); + if resp2_result.is_err() { + let resp3_result = resp3_decode_frame(codec, src).map(|f| f.map(|f| f.into())); + if resp3_result.is_ok() { + resp3_result + } else { + resp2_result + } + } else { + resp2_result + } +} + pub struct RedisCodec { pub name: Arc, pub server: String, + pub version: Arc>, + pub streaming_state: Option, #[cfg(feature = "metrics")] pub req_size_stats: Arc>, #[cfg(feature = "metrics")] @@ -114,51 +197,82 @@ impl RedisCodec { RedisCodec { server, name: inner.id.clone(), + version: inner.resp_version.clone(), + streaming_state: None, #[cfg(feature = "metrics")] req_size_stats: inner.req_size_stats.clone(), #[cfg(feature = "metrics")] res_size_stats: inner.res_size_stats.clone(), } } + + pub fn is_resp3(&self) -> bool { + *self.version.as_ref().load().as_ref() == RespVersion::RESP3 + } } -impl Encoder for RedisCodec { +impl Encoder for RedisCodec { type Error = RedisError; #[cfg(not(feature = "blocking-encoding"))] - fn encode(&mut self, item: Resp2Frame, dst: &mut BytesMut) -> Result<(), Self::Error> { - resp2_encode_frame(&self, item, dst) + fn encode(&mut self, item: ProtocolFrame, dst: &mut BytesMut) -> Result<(), Self::Error> { + match item { + ProtocolFrame::Resp2(frame) => resp2_encode_frame(&self, frame, dst), + ProtocolFrame::Resp3(frame) => resp3_encode_frame(&self, frame, dst), + } } #[cfg(feature = "blocking-encoding")] - fn encode(&mut self, item: Resp2Frame, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: ProtocolFrame, dst: &mut BytesMut) -> Result<(), Self::Error> { let frame_size = protocol_utils::frame_size(&item); if frame_size >= globals().blocking_encode_threshold() { trace!("{}: Encoding in blocking task with size {}", self.name, frame_size); - tokio::task::block_in_place(|| resp2_encode_frame(&self, item, dst)) + + tokio::task::block_in_place(|| match item { + ProtocolFrame::Resp2(frame) => resp2_encode_frame(&self, frame, dst), + ProtocolFrame::Resp3(frame) => resp3_encode_frame(&self, frame, dst), + }) } else { - resp2_encode_frame(&self, item, dst) + match item { + ProtocolFrame::Resp2(frame) => resp2_encode_frame(&self, frame, dst), + ProtocolFrame::Resp3(frame) => resp3_encode_frame(&self, frame, dst), + } } } } impl Decoder for RedisCodec { - type Item = Resp2Frame; + type Item = ProtocolFrame; type Error = RedisError; #[cfg(not(feature = "blocking-encoding"))] fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - resp2_decode_frame(&self, src) + if self.is_resp3() { + resp3_decode_frame(self, src).map(|f| f.map(|f| f.into())) + } else { + resp2_decode_with_fallback(self, src) + } } #[cfg(feature = "blocking-encoding")] fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { if src.len() >= globals().blocking_encode_threshold() { trace!("{}: Decoding in blocking task with size {}", self.name, src.len()); - tokio::task::block_in_place(|| resp2_decode_frame(&self, src)) + + tokio::task::block_in_place(|| { + if self.is_resp3() { + resp3_decode_frame(self, src).map(|f| f.map(|f| f.into())) + } else { + resp2_decode_with_fallback(self, src) + } + }) } else { - resp2_decode_frame(&self, src) + if self.is_resp3() { + resp3_decode_frame(self, src).map(|f| f.map(|f| f.into())) + } else { + resp2_decode_with_fallback(self, src) + } } } } diff --git a/src/protocol/connection.rs b/src/protocol/connection.rs index ff3b88ef..f26b1540 100644 --- a/src/protocol/connection.rs +++ b/src/protocol/connection.rs @@ -2,16 +2,19 @@ use crate::error::{RedisError, RedisErrorKind}; use crate::modules::inner::RedisClientInner; use crate::multiplexer::Counters; use crate::protocol::codec::RedisCodec; +use crate::protocol::types::ProtocolFrame; use crate::protocol::types::{ClusterKeyCache, RedisCommand, RedisCommandKind}; use crate::protocol::utils as protocol_utils; -use crate::protocol::utils::pretty_error; +use crate::protocol::utils::{frame_into_string, pretty_error}; use crate::types::{ClientState, InfoKind, Resolve}; use crate::utils as client_utils; use futures::sink::SinkExt; use futures::stream::{SplitSink, SplitStream, StreamExt}; -use redis_protocol::resp2::types::Frame as ProtocolFrame; +use redis_protocol::resp2::types::Frame as Resp2Frame; +use redis_protocol::resp3::types::{Frame as Resp3Frame, RespVersion}; use semver::Version; use std::net::SocketAddr; +use std::str; use std::sync::Arc; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::net::TcpStream; @@ -54,6 +57,27 @@ pub enum RedisTransport { Tcp(FramedTcp), } +pub fn null_frame(is_resp3: bool) -> ProtocolFrame { + if is_resp3 { + ProtocolFrame::Resp2(Resp2Frame::Null) + } else { + ProtocolFrame::Resp3(Resp3Frame::Null) + } +} + +pub fn is_ok(frame: &ProtocolFrame) -> bool { + match frame { + ProtocolFrame::Resp3(ref frame) => match frame { + Resp3Frame::SimpleString { ref data, .. } => data == OK, + _ => false, + }, + ProtocolFrame::Resp2(ref frame) => match frame { + Resp2Frame::SimpleString(ref data) => data == OK, + _ => false, + }, + } +} + pub fn split_transport(transport: RedisTransport) -> (RedisSink, RedisStream) { match transport { RedisTransport::Tcp(framed) => { @@ -70,17 +94,18 @@ pub fn split_transport(transport: RedisTransport) -> (RedisSink, RedisStream) { pub async fn request_response( mut transport: Framed, request: &RedisCommand, + is_resp3: bool, ) -> Result<(ProtocolFrame, Framed), RedisError> where T: AsyncRead + AsyncWrite + Unpin + 'static, { - let frame = request.to_frame()?; + let frame = request.to_frame(is_resp3)?; let _ = transport.send(frame).await?; let (response, transport) = transport.into_future().await; let response = match response { Some(result) => result?, - None => ProtocolFrame::Null, + None => null_frame(is_resp3), }; Ok((response, transport)) } @@ -88,11 +113,12 @@ where pub async fn request_response_safe( mut transport: Framed, request: &RedisCommand, + is_resp3: bool, ) -> Result<(ProtocolFrame, Framed), (RedisError, Framed)> where T: AsyncRead + AsyncWrite + Unpin + 'static, { - let frame = match request.to_frame() { + let frame = match request.to_frame(is_resp3) { Ok(frame) => frame, Err(e) => return Err((e, transport)), }; @@ -106,7 +132,7 @@ where Ok(frame) => frame, Err(e) => return Err((e, transport)), }, - None => ProtocolFrame::Null, + None => null_frame(is_resp3), }; Ok((response, transport)) @@ -115,17 +141,18 @@ where pub async fn transport_request_response( transport: RedisTransport, request: &RedisCommand, + is_resp3: bool, ) -> Result<(ProtocolFrame, RedisTransport), RedisError> { match transport { RedisTransport::Tcp(transport) => { - let (frame, transport) = match request_response_safe(transport, request).await { + let (frame, transport) = match request_response_safe(transport, request, is_resp3).await { Ok(result) => result, Err((e, _)) => return Err(e), }; Ok((frame, RedisTransport::Tcp(transport))) } RedisTransport::Tls(transport) => { - let (frame, transport) = match request_response_safe(transport, request).await { + let (frame, transport) = match request_response_safe(transport, request, is_resp3).await { Ok(result) => result, Err((e, _)) => return Err(e), }; @@ -139,6 +166,7 @@ pub async fn authenticate( name: &str, username: Option, password: Option, + is_resp3: bool, ) -> Result, RedisError> where T: AsyncRead + AsyncWrite + Unpin + 'static, @@ -152,19 +180,22 @@ where let command = RedisCommand::new(RedisCommandKind::Auth, args, None); debug!("{}: Authenticating Redis client...", name); - let (response, transport) = request_response(transport, &command).await?; - - if let ProtocolFrame::SimpleString(inner) = response { - if inner == OK { - transport - } else { - return Err(RedisError::new(RedisErrorKind::Auth, inner)); + let (response, transport) = request_response(transport, &command, is_resp3).await?; + + match frame_into_string(response.into_resp3()) { + Ok(inner) => { + if inner == OK { + transport + } else { + return Err(RedisError::new(RedisErrorKind::Auth, inner)); + } + } + Err(_) => { + return Err(RedisError::new( + RedisErrorKind::Auth, + "Invalid auth response. Expected string.", + )) } - } else { - return Err(RedisError::new( - RedisErrorKind::ProtocolError, - format!("Invalid auth response {:?}.", response), - )); } } else { transport @@ -172,23 +203,43 @@ where debug!("{}: Changing client name to {}", name, name); let command = RedisCommand::new(RedisCommandKind::ClientSetname, vec![name.into()], None); - let (response, transport) = request_response(transport, &command).await?; + let (response, transport) = request_response(transport, &command, is_resp3).await?; - if let ProtocolFrame::SimpleString(inner) = response { - if inner == OK { - debug!("{}: Successfully set Redis client name.", name); - Ok(transport) - } else { - Err(RedisError::new(RedisErrorKind::ProtocolError, inner)) - } + if is_ok(&response) { + debug!("{}: Successfully set Redis client name.", name); + Ok(transport) } else { + error!("{} Failed to set client name with error {:?}", name, response); Err(RedisError::new( RedisErrorKind::ProtocolError, - format!("Failed to set client name: {:?}.", response), + "Failed to set client name.", )) } } +pub async fn switch_protocols( + inner: &Arc, + transport: Framed, +) -> Result, RedisError> +where + T: AsyncRead + AsyncWrite + Unpin + 'static, +{ + // reset the protocol version to the one specified by the config when we create new connections + inner.reset_protocol_version(); + // this is only used when initializing connections, and if the caller has not specified RESP3 then we can skip this + if !inner.is_resp3() { + return Ok(transport); + } + + _debug!(inner, "Switching to RESP3 protocol with HELLO..."); + let cmd = RedisCommand::new(RedisCommandKind::Hello(RespVersion::RESP3), vec![], None); + let (response, transport) = request_response(transport, &cmd, true).await?; + let response = protocol_utils::frame_to_results(response.into_resp3())?; + + _debug!(inner, "Recv HELLO response {:?}", response); + Ok(transport) +} + pub async fn read_client_id( inner: &Arc, transport: Framed, @@ -197,16 +248,40 @@ where T: AsyncRead + AsyncWrite + Unpin + 'static, { let command = RedisCommand::new(RedisCommandKind::ClientID, vec![], None); - let (result, transport) = request_response_safe(transport, &command).await?; + let (result, transport) = request_response_safe(transport, &command, inner.is_resp3()).await?; _debug!(inner, "Read client ID: {:?}", result); - let id = match result { - ProtocolFrame::Integer(i) => Some(i), + let id = match result.into_resp3() { + Resp3Frame::Number { data, .. } => Some(data), _ => None, }; Ok((id, transport)) } +pub async fn select_database( + inner: &Arc, + transport: Framed, +) -> Result, RedisError> +where + T: AsyncRead + AsyncWrite + Unpin + 'static, +{ + let db = match inner.config.read().database.clone() { + Some(db) => db, + None => return Ok(transport), + }; + + _trace!(inner, "Selecting database {} after connecting.", db); + let command = RedisCommand::new(RedisCommandKind::Select, vec![db.into()], None); + let (result, transport) = request_response(transport, &command, inner.is_resp3()).await?; + let response = result.into_resp3(); + + if let Some(error) = protocol_utils::frame_to_error(&response) { + Err(error) + } else { + Ok(transport) + } +} + #[cfg(feature = "enable-tls")] pub async fn create_authenticated_connection_tls( addr: &SocketAddr, @@ -222,7 +297,9 @@ pub async fn create_authenticated_connection_tls( let socket = TcpStream::connect(addr).await?; let tls_stream = tls::create_tls_connector(&inner.config)?; let socket = tls_stream.connect(domain, socket).await?; - let framed = authenticate(Framed::new(socket, codec), &client_name, username, password).await?; + let framed = switch_protocols(inner, Framed::new(socket, codec)).await?; + let framed = authenticate(framed, &client_name, username, password, inner.is_resp3()).await?; + let framed = select_database(inner, framed).await?; client_utils::set_client_state(&inner.state, ClientState::Connected); Ok(framed) @@ -248,7 +325,9 @@ pub async fn create_authenticated_connection( let username = inner.config.read().username.clone(); let socket = TcpStream::connect(addr).await?; - let framed = authenticate(Framed::new(socket, codec), &client_name, username, password).await?; + let framed = switch_protocols(inner, Framed::new(socket, codec)).await?; + let framed = authenticate(framed, &client_name, username, password, inner.is_resp3()).await?; + let framed = select_database(inner, framed).await?; client_utils::set_client_state(&inner.state, ClientState::Connected); Ok(framed) @@ -302,8 +381,8 @@ async fn read_cluster_state( } }; - match request_response(connection, &command).await { - Ok((frame, _)) => frame, + match request_response(connection, &command, inner.is_resp3()).await { + Ok((frame, _)) => frame.into_resp3(), Err(e) => { _trace!(inner, "Failed to read cluster state from {}:{} => {:?}", host, port, e); return None; @@ -318,8 +397,8 @@ async fn read_cluster_state( } }; - match request_response(connection, &command).await { - Ok((frame, _)) => frame, + match request_response(connection, &command, inner.is_resp3()).await { + Ok((frame, _)) => frame.into_resp3(), Err(e) => { _trace!(inner, "Failed to read cluster state from {}:{} => {:?}", host, port, e); return None; @@ -337,7 +416,7 @@ async fn read_cluster_state( ); return None; } - let cluster_state = match response.to_string() { + let cluster_state = match response.as_str() { Some(response) => response, None => return None, }; @@ -359,10 +438,14 @@ where T: AsyncRead + AsyncWrite + Unpin + 'static, { let command = RedisCommand::new(RedisCommandKind::Info, vec![InfoKind::Server.to_str().into()], None); - let (result, transport) = request_response(transport, &command).await?; - let result = match result { - ProtocolFrame::BulkString(bytes) => String::from_utf8(bytes)?, - ProtocolFrame::Error(e) => return Err(pretty_error(&e)), + let (result, transport) = request_response(transport, &command, inner.is_resp3()).await?; + let result = match result.into_resp3() { + Resp3Frame::BlobString { data, .. } => String::from_utf8(data.to_vec())?, + Resp3Frame::SimpleError { data, .. } => return Err(pretty_error(&data)), + Resp3Frame::BlobError { data, .. } => { + let parsed = String::from_utf8_lossy(&data); + return Err(pretty_error(&parsed)); + } _ => { return Err(RedisError::new( RedisErrorKind::ProtocolError, diff --git a/src/protocol/debug.rs b/src/protocol/debug.rs new file mode 100644 index 00000000..30c5e87f --- /dev/null +++ b/src/protocol/debug.rs @@ -0,0 +1,114 @@ +use redis_protocol::resp2::types::Frame as Resp2Frame; +use redis_protocol::resp3::types::Auth; +use redis_protocol::resp3::types::Frame as Resp3Frame; +use std::collections::{HashMap, HashSet}; +use std::hash::{Hash, Hasher}; +use std::str; + +#[derive(Debug)] +enum DebugFrame { + String(String), + Bytes(Vec), + Integer(i64), + Double(f64), + Array(Vec), + // TODO add support for maps in network logs + #[allow(dead_code)] + Map(HashMap), + #[allow(dead_code)] + Set(HashSet), +} + +impl Hash for DebugFrame { + fn hash(&self, state: &mut H) { + match self { + DebugFrame::String(ref s) => { + 's'.hash(state); + s.hash(state) + } + DebugFrame::Bytes(ref b) => { + 'b'.hash(state); + b.hash(state) + } + DebugFrame::Integer(ref i) => { + 'i'.hash(state); + i.hash(state) + } + DebugFrame::Double(ref f) => { + 'd'.hash(state); + f.to_be_bytes().hash(state) + } + _ => panic!("Cannot hash network log debug frame {:?}", self), + } + } +} + +fn bytes_or_string(b: &[u8]) -> DebugFrame { + match str::from_utf8(b) { + Ok(s) => DebugFrame::String(s.to_owned()), + Err(_) => DebugFrame::Bytes(b.to_vec()), + } +} + +impl<'a> From<&'a Resp2Frame> for DebugFrame { + fn from(f: &'a Resp2Frame) -> Self { + match f { + Resp2Frame::Error(s) => DebugFrame::String(s.to_string()), + Resp2Frame::SimpleString(s) => bytes_or_string(s), + Resp2Frame::Integer(i) => DebugFrame::Integer(*i), + Resp2Frame::BulkString(b) => bytes_or_string(b), + Resp2Frame::Null => DebugFrame::String("nil".into()), + Resp2Frame::Array(frames) => DebugFrame::Array(frames.iter().map(|f| f.into()).collect()), + } + } +} + +impl<'a> From<&'a Resp3Frame> for DebugFrame { + fn from(frame: &'a Resp3Frame) -> Self { + match frame { + Resp3Frame::Map { ref data, .. } => DebugFrame::Array(data.iter().fold(vec![], |mut memo, (key, value)| { + memo.push(key.into()); + memo.push(value.into()); + memo + })), + Resp3Frame::Set { ref data, .. } => DebugFrame::Array(data.iter().map(|d| d.into()).collect()), + Resp3Frame::Array { ref data, .. } => DebugFrame::Array(data.iter().map(|d| d.into()).collect()), + Resp3Frame::BlobError { ref data, .. } => bytes_or_string(data), + Resp3Frame::BlobString { ref data, .. } => bytes_or_string(data), + Resp3Frame::SimpleString { ref data, .. } => bytes_or_string(data), + Resp3Frame::SimpleError { ref data, .. } => DebugFrame::String(data.to_string()), + Resp3Frame::Double { ref data, .. } => DebugFrame::Double(*data), + Resp3Frame::BigNumber { ref data, .. } => bytes_or_string(data), + Resp3Frame::Number { ref data, .. } => DebugFrame::Integer(*data), + Resp3Frame::Boolean { ref data, .. } => DebugFrame::String(data.to_string()), + Resp3Frame::Null => DebugFrame::String("nil".into()), + Resp3Frame::Push { ref data, .. } => DebugFrame::Array(data.iter().map(|d| d.into()).collect()), + Resp3Frame::ChunkedString(ref data) => bytes_or_string(data), + Resp3Frame::VerbatimString { ref data, .. } => bytes_or_string(data), + Resp3Frame::Hello { + ref version, ref auth, .. + } => { + let mut values = vec![DebugFrame::Integer(version.to_byte() as i64)]; + if let Some(Auth { + ref username, + ref password, + }) = auth + { + values.push(DebugFrame::String(username.to_string())); + values.push(DebugFrame::String(password.to_string())); + } + DebugFrame::Array(values) + } + } + } +} + +pub fn log_resp2_frame(name: &str, frame: &Resp2Frame, encode: bool) { + let prefix = if encode { "Encoded" } else { "Decoded" }; + trace!("{}: {} {:?}", name, prefix, DebugFrame::from(frame)) +} + +pub fn log_resp3_frame(name: &str, frame: &Resp3Frame, encode: bool) { + let prefix = if encode { "Encoded" } else { "Decoded" }; + trace!("{}: {} {:?}", name, prefix, DebugFrame::from(frame)) +} diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs index 0749bbd0..b260c186 100644 --- a/src/protocol/mod.rs +++ b/src/protocol/mod.rs @@ -1,5 +1,7 @@ pub mod codec; pub mod connection; +#[cfg(feature = "network-logs")] +pub mod debug; pub mod tls; pub mod types; pub mod utils; diff --git a/src/protocol/types.rs b/src/protocol/types.rs index ebe5d146..31d5b6f8 100644 --- a/src/protocol/types.rs +++ b/src/protocol/types.rs @@ -1,13 +1,19 @@ use super::utils as protocol_utils; -use crate::client::RedisClient; +use crate::clients::RedisClient; use crate::error::{RedisError, RedisErrorKind}; -use crate::globals::globals; +use crate::modules::inner::RedisClientInner; use crate::types::*; use crate::utils; use crate::utils::{set_locked, take_locked}; +use bytes_utils::Str; use parking_lot::RwLock; +use rand::Rng; +use redis_protocol::resp2::types::Frame as Resp2Frame; +use redis_protocol::resp2_frame_to_resp3; +use redis_protocol::resp3::types::Frame as Resp3Frame; pub use redis_protocol::{redis_keyslot, resp2::types::NULL, types::CRLF}; -use std::collections::{BTreeSet, VecDeque}; +use std::collections::{BTreeMap, BTreeSet, HashMap, VecDeque}; +use std::convert::TryInto; use std::fmt; use std::net::{SocketAddr, ToSocketAddrs}; use std::sync::Arc; @@ -15,19 +21,51 @@ use std::time::Instant; use tokio::sync::mpsc::UnboundedSender; use tokio::sync::oneshot::Sender as OneshotSender; +#[cfg(feature = "blocking-encoding")] +use crate::globals::globals; + #[cfg(not(feature = "full-tracing"))] use crate::trace::disabled::Span as FakeSpan; #[cfg(any(feature = "full-tracing", feature = "partial-tracing"))] use crate::trace::CommandTraces; #[cfg(any(feature = "full-tracing", feature = "partial-tracing"))] use crate::trace::Span; -use rand::Rng; -use std::borrow::Cow; pub const REDIS_CLUSTER_SLOTS: u16 = 16384; +#[derive(Debug)] +pub enum ProtocolFrame { + Resp2(Resp2Frame), + Resp3(Resp3Frame), +} + +impl ProtocolFrame { + pub fn into_resp3(self) -> Resp3Frame { + // since the `RedisValue::convert` logic already accounts for different encodings of maps and sets we can just + // change everything to RESP3 above the protocol layer. resp2->resp3 is lossless so this is safe. + match self { + ProtocolFrame::Resp2(frame) => resp2_frame_to_resp3(frame), + ProtocolFrame::Resp3(frame) => frame, + } + } +} + +impl From for ProtocolFrame { + fn from(frame: Resp2Frame) -> Self { + ProtocolFrame::Resp2(frame) + } +} + +impl From for ProtocolFrame { + fn from(frame: Resp3Frame) -> Self { + ProtocolFrame::Resp3(frame) + } +} + #[derive(Clone)] pub struct AllNodesResponse { + // this state can shared across tasks scheduled in different threads on multi-thread runtimes when we + // send commands to all servers at once and wait for all the responses num_nodes: Arc>, resp_tx: Arc>>>>, } @@ -82,6 +120,7 @@ pub struct CustomKeySlot { #[derive(Clone)] pub struct SplitCommand { + // TODO change to mutex pub tx: Arc, RedisError>>>>>, pub config: Option, } @@ -102,8 +141,8 @@ impl Eq for SplitCommand {} #[derive(Clone)] pub enum ResponseKind { - Blocking { tx: Option> }, - Multiple { count: usize, buffer: VecDeque }, + Blocking { tx: Option> }, + Multiple { count: usize, buffer: VecDeque }, } impl fmt::Debug for ResponseKind { @@ -131,7 +170,7 @@ impl Eq for ResponseKind {} pub struct KeyScanInner { pub key_slot: Option, - pub cursor: String, + pub cursor: Str, pub tx: UnboundedSender>, } @@ -150,7 +189,7 @@ pub enum ValueScanResult { } pub struct ValueScanInner { - pub cursor: String, + pub cursor: Str, pub tx: UnboundedSender>, } @@ -174,11 +213,11 @@ impl ValueScanInner { )); } - let mut out = utils::new_map(data.len() / 2); + let mut out = HashMap::with_capacity(data.len() / 2); while data.len() >= 2 { let value = data.pop().unwrap(); - let key = match data.pop().unwrap() { - RedisValue::String(s) => s, + let key: RedisKey = match data.pop().unwrap() { + RedisValue::String(s) => s.into(), _ => { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -190,7 +229,7 @@ impl ValueScanInner { out.insert(key, value); } - Ok(out.into()) + Ok(out.try_into()?) } pub fn transform_zscan_result(mut data: Vec) -> Result, RedisError> { @@ -321,6 +360,7 @@ pub enum RedisCommandKind { GetRange, GetSet, HDel, + Hello(RespVersion), HExists, HGet, HGetAll, @@ -432,6 +472,26 @@ pub enum RedisCommandKind { Unwatch, Wait, Watch, + XinfoConsumers, + XinfoGroups, + XinfoStream, + Xadd, + Xtrim, + Xdel, + Xrange, + Xrevrange, + Xlen, + Xread((bool, Option)), + Xgroupcreate, + XgroupCreateConsumer, + XgroupDelConsumer, + XgroupDestroy, + XgroupSetId, + Xreadgroup((bool, Option)), + Xack, + Xclaim, + Xautoclaim, + Xpending, Zadd, Zcard, Zcount, @@ -473,6 +533,7 @@ pub enum RedisCommandKind { _Close, _Split(SplitCommand), _AuthAllCluster(AllNodesResponse), + _HelloAllCluster((AllNodesResponse, RespVersion)), _FlushAllCluster(AllNodesResponse), _ScriptFlushCluster(AllNodesResponse), _ScriptLoadCluster(AllNodesResponse), @@ -515,6 +576,20 @@ impl RedisCommandKind { } } + pub fn is_hello(&self) -> bool { + match *self { + RedisCommandKind::Hello(_) | RedisCommandKind::_HelloAllCluster(_) => true, + _ => false, + } + } + + pub fn is_auth(&self) -> bool { + match *self { + RedisCommandKind::Auth => true, + _ => false, + } + } + pub fn is_value_scan(&self) -> bool { match *self { RedisCommandKind::Zscan(_) | RedisCommandKind::Hscan(_) | RedisCommandKind::Sscan(_) => true, @@ -621,7 +696,7 @@ impl RedisCommandKind { } /// Read the command's protocol string without panicking. - pub fn to_str_debug(&self) -> &'static str { + pub fn to_str_debug(&self) -> &str { match *self { RedisCommandKind::AclLoad => "ACL LOAD", RedisCommandKind::AclSave => "ACL SAVE", @@ -716,6 +791,7 @@ impl RedisCommandKind { RedisCommandKind::GetRange => "GETRANGE", RedisCommandKind::GetSet => "GETSET", RedisCommandKind::HDel => "HDEL", + RedisCommandKind::Hello(_) => "HELLO", RedisCommandKind::HExists => "HEXISTS", RedisCommandKind::HGet => "HGET", RedisCommandKind::HGetAll => "HGETALL", @@ -827,6 +903,26 @@ impl RedisCommandKind { RedisCommandKind::Unwatch => "UNWATCH", RedisCommandKind::Wait => "WAIT", RedisCommandKind::Watch => "WATCH", + RedisCommandKind::XinfoConsumers => "XINFO CONSUMERS", + RedisCommandKind::XinfoGroups => "XINFO GROUPS", + RedisCommandKind::XinfoStream => "XINFO STREAM", + RedisCommandKind::Xadd => "XADD", + RedisCommandKind::Xtrim => "XTRIM", + RedisCommandKind::Xdel => "XDEL", + RedisCommandKind::Xrange => "XRANGE", + RedisCommandKind::Xrevrange => "XREVRANGE", + RedisCommandKind::Xlen => "XLEN", + RedisCommandKind::Xread(_) => "XREAD", + RedisCommandKind::Xgroupcreate => "XGROUP CREATE", + RedisCommandKind::XgroupCreateConsumer => "XGROUP CREATECONSUMER", + RedisCommandKind::XgroupDelConsumer => "XGROUP DELCONSUMER", + RedisCommandKind::XgroupDestroy => "XGROUP DESTROY", + RedisCommandKind::XgroupSetId => "XGROUP SETID", + RedisCommandKind::Xreadgroup(_) => "XREADGROUP", + RedisCommandKind::Xack => "XACK", + RedisCommandKind::Xclaim => "XCLAIM", + RedisCommandKind::Xautoclaim => "XAUTOCLAIM", + RedisCommandKind::Xpending => "XPENDING", RedisCommandKind::Zadd => "ZADD", RedisCommandKind::Zcard => "ZCARD", RedisCommandKind::Zcount => "ZCOUNT", @@ -868,17 +964,18 @@ impl RedisCommandKind { RedisCommandKind::_Close => "CLOSE", RedisCommandKind::_Split(_) => "SPLIT", RedisCommandKind::_AuthAllCluster(_) => "AUTH ALL CLUSTER", + RedisCommandKind::_HelloAllCluster(_) => "HELLO ALL CLUSTER", RedisCommandKind::_FlushAllCluster(_) => "FLUSHALL CLUSTER", RedisCommandKind::_ScriptFlushCluster(_) => "SCRIPT FLUSH CLUSTER", RedisCommandKind::_ScriptLoadCluster(_) => "SCRIPT LOAD CLUSTER", RedisCommandKind::_ScriptKillCluster(_) => "SCRIPT Kill CLUSTER", - RedisCommandKind::_Custom(ref kind) => kind.cmd, + RedisCommandKind::_Custom(ref kind) => &kind.cmd, } } /// Read the protocol string for a command, panicking for internal commands that don't map directly to redis command. - pub(crate) fn cmd_str(&self) -> &'static str { - match *self { + pub(crate) fn cmd_str(&self) -> Str { + let s = match *self { RedisCommandKind::AclLoad => "ACL", RedisCommandKind::AclSave => "ACL", RedisCommandKind::AclList => "ACL", @@ -973,6 +1070,7 @@ impl RedisCommandKind { RedisCommandKind::GetRange => "GETRANGE", RedisCommandKind::GetSet => "GETSET", RedisCommandKind::HDel => "HDEL", + RedisCommandKind::Hello(_) => "HELLO", RedisCommandKind::HExists => "HEXISTS", RedisCommandKind::HGet => "HGET", RedisCommandKind::HGetAll => "HGETALL", @@ -1084,6 +1182,26 @@ impl RedisCommandKind { RedisCommandKind::Unwatch => "UNWATCH", RedisCommandKind::Wait => "WAIT", RedisCommandKind::Watch => "WATCH", + RedisCommandKind::XinfoConsumers => "XINFO", + RedisCommandKind::XinfoGroups => "XINFO", + RedisCommandKind::XinfoStream => "XINFO", + RedisCommandKind::Xadd => "XADD", + RedisCommandKind::Xtrim => "XTRIM", + RedisCommandKind::Xdel => "XDEL", + RedisCommandKind::Xrange => "XRANGE", + RedisCommandKind::Xrevrange => "XREVRANGE", + RedisCommandKind::Xlen => "XLEN", + RedisCommandKind::Xread(_) => "XREAD", + RedisCommandKind::Xgroupcreate => "XGROUP", + RedisCommandKind::XgroupCreateConsumer => "XGROUP", + RedisCommandKind::XgroupDelConsumer => "XGROUP", + RedisCommandKind::XgroupDestroy => "XGROUP", + RedisCommandKind::XgroupSetId => "XGROUP", + RedisCommandKind::Xreadgroup(_) => "XREADGROUP", + RedisCommandKind::Xack => "XACK", + RedisCommandKind::Xclaim => "XCLAIM", + RedisCommandKind::Xautoclaim => "XAUTOCLAIM", + RedisCommandKind::Xpending => "XPENDING", RedisCommandKind::Zadd => "ZADD", RedisCommandKind::Zcard => "ZCARD", RedisCommandKind::Zcount => "ZCOUNT", @@ -1126,11 +1244,14 @@ impl RedisCommandKind { RedisCommandKind::Hscan(_) => "HSCAN", RedisCommandKind::Zscan(_) => "ZSCAN", RedisCommandKind::_AuthAllCluster(_) => "AUTH", - RedisCommandKind::_Custom(ref kind) => kind.cmd, + RedisCommandKind::_HelloAllCluster(_) => "HELLO", + RedisCommandKind::_Custom(ref kind) => return kind.cmd.clone(), RedisCommandKind::_Close | RedisCommandKind::_Split(_) => { panic!("unreachable (redis command)") } - } + }; + + utils::static_str(s) } /// Read the optional subcommand string for a command. @@ -1198,6 +1319,14 @@ impl RedisCommandKind { RedisCommandKind::MemoryMallocStats => "MALLOC-STATS", RedisCommandKind::MemoryStats => "STATS", RedisCommandKind::MemoryPurge => "PURGE", + RedisCommandKind::XinfoConsumers => "CONSUMERS", + RedisCommandKind::XinfoGroups => "GROUPS", + RedisCommandKind::XinfoStream => "STREAM", + RedisCommandKind::Xgroupcreate => "CREATE", + RedisCommandKind::XgroupCreateConsumer => "CREATECONSUMER", + RedisCommandKind::XgroupDelConsumer => "DELCONSUMER", + RedisCommandKind::XgroupDestroy => "DESTROY", + RedisCommandKind::XgroupSetId => "SETID", _ => return None, }; @@ -1302,6 +1431,32 @@ impl RedisCommandKind { } } + pub fn is_stream_command(&self) -> bool { + match *self { + RedisCommandKind::XinfoConsumers + | RedisCommandKind::XinfoGroups + | RedisCommandKind::XinfoStream + | RedisCommandKind::Xadd + | RedisCommandKind::Xtrim + | RedisCommandKind::Xdel + | RedisCommandKind::Xrange + | RedisCommandKind::Xrevrange + | RedisCommandKind::Xlen + | RedisCommandKind::Xread(_) + | RedisCommandKind::Xgroupcreate + | RedisCommandKind::XgroupCreateConsumer + | RedisCommandKind::XgroupDelConsumer + | RedisCommandKind::XgroupDestroy + | RedisCommandKind::XgroupSetId + | RedisCommandKind::Xreadgroup(_) + | RedisCommandKind::Xack + | RedisCommandKind::Xclaim + | RedisCommandKind::Xautoclaim + | RedisCommandKind::Xpending => true, + _ => false, + } + } + pub fn is_blocking(&self) -> bool { match *self { RedisCommandKind::BlPop @@ -1311,6 +1466,8 @@ impl RedisCommandKind { | RedisCommandKind::BzPopMin | RedisCommandKind::BzPopMax | RedisCommandKind::Wait => true, + RedisCommandKind::Xread((ref blocking, _)) => *blocking, + RedisCommandKind::Xreadgroup((ref blocking, _)) => *blocking, RedisCommandKind::_Custom(ref kind) => kind.is_blocking, _ => false, } @@ -1322,6 +1479,8 @@ impl RedisCommandKind { RedisCommandKind::_Custom(ref kind) => kind.hash_slot.clone(), RedisCommandKind::EvalSha(ref slot) => slot.key_slot.clone(), RedisCommandKind::Eval(ref slot) => slot.key_slot.clone(), + RedisCommandKind::Xread((_, ref slot)) => slot.clone(), + RedisCommandKind::Xreadgroup((_, ref slot)) => slot.clone(), _ => None, } } @@ -1332,6 +1491,7 @@ impl RedisCommandKind { | RedisCommandKind::_AuthAllCluster(_) | RedisCommandKind::_ScriptFlushCluster(_) | RedisCommandKind::_ScriptKillCluster(_) + | RedisCommandKind::_HelloAllCluster(_) | RedisCommandKind::_ScriptLoadCluster(_) => true, _ => false, } @@ -1346,6 +1506,7 @@ impl RedisCommandKind { pub fn all_nodes_response(&self) -> Option<&AllNodesResponse> { match *self { + RedisCommandKind::_HelloAllCluster((ref inner, _)) => Some(inner), RedisCommandKind::_AuthAllCluster(ref inner) => Some(inner), RedisCommandKind::_FlushAllCluster(ref inner) => Some(inner), RedisCommandKind::_ScriptFlushCluster(ref inner) => Some(inner), @@ -1357,6 +1518,9 @@ impl RedisCommandKind { pub fn clone_all_nodes(&self) -> Option { match *self { + RedisCommandKind::_HelloAllCluster((ref inner, ref version)) => { + Some(RedisCommandKind::_HelloAllCluster((inner.clone(), version.clone()))) + } RedisCommandKind::_AuthAllCluster(ref inner) => Some(RedisCommandKind::_AuthAllCluster(inner.clone())), RedisCommandKind::_FlushAllCluster(ref inner) => Some(RedisCommandKind::_FlushAllCluster(inner.clone())), RedisCommandKind::_ScriptFlushCluster(ref inner) => Some(RedisCommandKind::_ScriptFlushCluster(inner.clone())), @@ -1375,7 +1539,7 @@ impl RedisCommandKind { } /// Alias for a sender to notify the caller that a response was received. -pub type ResponseSender = Option>>; +pub type ResponseSender = Option>>; /// An arbitrary Redis command. pub struct RedisCommand { @@ -1478,26 +1642,26 @@ impl RedisCommand { self.attempted += 1; } - pub fn max_attempts_exceeded(&self) -> bool { - self.attempted >= globals().max_command_attempts() + pub fn max_attempts_exceeded(&self, inner: &Arc) -> bool { + self.attempted >= inner.perf_config.max_command_attempts() } /// Convert to a single frame with an array of bulk strings (or null). #[cfg(not(feature = "blocking-encoding"))] - pub fn to_frame(&self) -> Result { - protocol_utils::command_to_frame(self) + pub fn to_frame(&self, is_resp3: bool) -> Result { + protocol_utils::command_to_frame(self, is_resp3) } /// Convert to a single frame with an array of bulk strings (or null), using a blocking task. #[cfg(feature = "blocking-encoding")] - pub fn to_frame(&self) -> Result { + pub fn to_frame(&self, is_resp3: bool) -> Result { let cmd_size = protocol_utils::args_size(&self.args); if cmd_size >= globals().blocking_encode_threshold() { trace!("Using blocking task to convert command to frame with size {}", cmd_size); - tokio::task::block_in_place(|| protocol_utils::command_to_frame(self)) + tokio::task::block_in_place(|| protocol_utils::command_to_frame(self, is_resp3)) } else { - protocol_utils::command_to_frame(self) + protocol_utils::command_to_frame(self, is_resp3) } } @@ -1519,18 +1683,23 @@ impl RedisCommand { } /// Read the first key in the command, if any. - pub fn extract_key(&self) -> Option> { - if self.no_cluster() { + pub fn extract_key(&self) -> Option<&[u8]> { + let has_custom_key_location = match self.kind { + RedisCommandKind::Xread(_) => true, + RedisCommandKind::Xreadgroup(_) => true, + _ => false, + }; + if self.no_cluster() || has_custom_key_location { return None; } match self.args.first() { - Some(RedisValue::String(ref s)) => Some(Cow::Borrowed(s)), - Some(RedisValue::Bytes(ref b)) => Some(String::from_utf8_lossy(b)), + Some(RedisValue::String(ref s)) => Some(s.as_bytes()), + Some(RedisValue::Bytes(ref b)) => Some(b), Some(_) => match self.args.get(1) { // some commands take a `num_keys` argument first, followed by keys - Some(RedisValue::String(ref s)) => Some(Cow::Borrowed(s)), - Some(RedisValue::Bytes(ref b)) => Some(String::from_utf8_lossy(b)), + Some(RedisValue::String(ref s)) => Some(s.as_bytes()), + Some(RedisValue::Bytes(ref b)) => Some(b), _ => None, }, None => None, @@ -1598,6 +1767,7 @@ pub struct SlotRange { /// The cached view of the cluster used by the client to route commands to the correct cluster nodes. #[derive(Debug, Clone)] pub struct ClusterKeyCache { + // TODO use arcswap here data: Vec>, } @@ -1609,7 +1779,7 @@ impl From>> for ClusterKeyCache { impl ClusterKeyCache { /// Create a new cache from the output of CLUSTER NODES, if available. - pub fn new(status: Option) -> Result { + pub fn new(status: Option<&str>) -> Result { let mut cache = ClusterKeyCache { data: Vec::new() }; if let Some(status) = status { @@ -1619,6 +1789,17 @@ impl ClusterKeyCache { Ok(cache) } + /// Read a set of unique hash slots that each map to a primary/main node in the cluster. + pub fn unique_hash_slots(&self) -> Vec { + let mut out = BTreeMap::new(); + + for slot in self.data.iter() { + out.insert(&slot.server, slot.start); + } + + out.into_iter().map(|(_, v)| v).collect() + } + /// Read the set of unique primary/main nodes in the cluster. pub fn unique_main_nodes(&self) -> Vec> { let mut out = BTreeSet::new(); @@ -1636,7 +1817,7 @@ impl ClusterKeyCache { } /// Rebuild the cache in place with the output of a CLUSTER NODES command. - pub fn rebuild(&mut self, status: String) -> Result<(), RedisError> { + pub fn rebuild(&mut self, status: &str) -> Result<(), RedisError> { if status.trim().is_empty() { error!("Invalid empty CLUSTER NODES response."); return Err(RedisError::new( @@ -1660,7 +1841,7 @@ impl ClusterKeyCache { } /// Calculate the cluster hash slot for the provided key. - pub fn hash_key(key: &str) -> u16 { + pub fn hash_key(key: &[u8]) -> u16 { redis_protocol::redis_keyslot(key) } diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index fceb9933..41a94684 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,16 +1,24 @@ use crate::error::{RedisError, RedisErrorKind}; use crate::modules::inner::RedisClientInner; use crate::protocol::connection::OK; +use crate::protocol::types::ProtocolFrame; use crate::protocol::types::*; use crate::types::Resolve; use crate::types::*; use crate::types::{RedisConfig, ServerConfig, QUEUED}; use crate::utils; +use crate::utils::redis_string_to_f64; +use bytes::Bytes; +use bytes_utils::Str; use parking_lot::RwLock; -use redis_protocol::resp2::types::{Frame as ProtocolFrame, FrameKind as ProtocolFrameKind}; +use redis_protocol::resp2::types::Frame as Resp2Frame; +use redis_protocol::resp3::types::{Auth, PUBSUB_PUSH_PREFIX}; +use redis_protocol::resp3::types::{Frame as Resp3Frame, FrameMap}; use std::borrow::Cow; use std::collections::HashMap; +use std::convert::TryInto; use std::net::SocketAddr; +use std::ops::Deref; use std::str; use std::sync::Arc; @@ -25,6 +33,14 @@ pub fn uses_tls(inner: &Arc) -> bool { inner.config.read().tls.is_some() } +/// Whether the provided frame is null. +pub fn is_null(frame: &Resp3Frame) -> bool { + match frame { + Resp3Frame::Null => true, + _ => false, + } +} + #[cfg(not(feature = "enable-tls"))] pub fn uses_tls(_: &Arc) -> bool { false @@ -131,7 +147,7 @@ pub fn binary_search(slots: &Vec>, slot: u16) -> Option Result, Vec>, RedisError> { +pub fn parse_cluster_nodes(status: &str) -> Result, Vec>, RedisError> { let mut out: HashMap, Vec> = HashMap::new(); // build out the slot ranges for the primary nodes @@ -200,7 +216,7 @@ pub fn pretty_error(resp: &str) -> RedisError { "" => RedisErrorKind::Unknown, "ERR" => RedisErrorKind::Unknown, "WRONGTYPE" => RedisErrorKind::InvalidArgument, - "NOAUTH" => RedisErrorKind::Auth, + "NOAUTH" | "WRONGPASS" => RedisErrorKind::Auth, "MOVED" | "ASK" => RedisErrorKind::Cluster, "Invalid" => match parts.next().unwrap_or("").as_ref() { "argument(s)" | "Argument" => RedisErrorKind::InvalidArgument, @@ -219,9 +235,30 @@ pub fn pretty_error(resp: &str) -> RedisError { RedisError::new(kind, details) } -pub fn frame_to_pubsub(frame: ProtocolFrame) -> Result<(String, RedisValue), RedisError> { +/// Parse the frame as a string, without support for error frames. +pub fn frame_into_string(frame: Resp3Frame) -> Result { + match frame { + Resp3Frame::SimpleString { data, .. } => Ok(String::from_utf8(data.to_vec())?), + Resp3Frame::BlobString { data, .. } => Ok(String::from_utf8(data.to_vec())?), + Resp3Frame::Double { data, .. } => Ok(data.to_string()), + Resp3Frame::Number { data, .. } => Ok(data.to_string()), + Resp3Frame::Boolean { data, .. } => Ok(data.to_string()), + Resp3Frame::VerbatimString { data, .. } => Ok(String::from_utf8(data.to_vec())?), + Resp3Frame::BigNumber { data, .. } => Ok(String::from_utf8(data.to_vec())?), + _ => Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Expected protocol string.", + )), + } +} + +/// Convert the frame to a `(channel, message)` tuple from the pubsub interface. +pub fn frame_to_pubsub(frame: Resp3Frame) -> Result<(String, RedisValue), RedisError> { if let Ok((channel, message)) = frame.parse_as_pubsub() { - Ok((channel, RedisValue::String(message))) + let channel = frame_into_string(channel)?; + let message = frame_to_single_result(message)?; + + Ok((channel, message)) } else { Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -230,193 +267,456 @@ pub fn frame_to_pubsub(frame: ProtocolFrame) -> Result<(String, RedisValue), Red } } +/// Attempt to parse a RESP3 frame as a pubsub message in the RESP2 format. +/// +/// This can be useful in cases where the codec layer automatically upgrades to RESP3, +/// but the contents of the pubsub message still use the RESP2 format. +pub fn parse_as_resp2_pubsub(frame: Resp3Frame) -> Result<(String, RedisValue), RedisError> { + // there's a few ways to do this, but i don't want to re-implement the logic in redis_protocol. + // the main difference between resp2 and resp3 here is the presence of a "pubsub" string at the + // beginning of the push array, so we just add that to the front here. + + let mut out = Vec::with_capacity(frame.len() + 1); + out.push(Resp3Frame::SimpleString { + data: PUBSUB_PUSH_PREFIX.into(), + attributes: None, + }); + + if let Resp3Frame::Push { data, .. } = frame { + out.extend(data); + let frame = Resp3Frame::Push { + data: out, + attributes: None, + }; + + frame_to_pubsub(frame) + } else { + Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Invalid pubsub message. Expected push frame.", + )) + } +} + +#[cfg(not(feature = "ignore-auth-error"))] +pub fn check_resp2_auth_error(frame: Resp2Frame) -> Resp2Frame { + frame +} + +#[cfg(feature = "ignore-auth-error")] +pub fn check_resp2_auth_error(frame: Resp2Frame) -> Resp2Frame { + let is_auth_error = match frame { + Resp2Frame::Error(ref data) => *data == "ERR Client sent AUTH, but no password is set", + _ => false, + }; + + if is_auth_error { + Resp2Frame::SimpleString(OK.into()) + } else { + frame + } +} + #[cfg(not(feature = "ignore-auth-error"))] -pub fn check_auth_error(frame: ProtocolFrame) -> ProtocolFrame { +pub fn check_resp3_auth_error(frame: Resp3Frame) -> Resp3Frame { frame } #[cfg(feature = "ignore-auth-error")] -pub fn check_auth_error(frame: ProtocolFrame) -> ProtocolFrame { +pub fn check_resp3_auth_error(frame: Resp3Frame) -> Resp3Frame { let is_auth_error = match frame { - ProtocolFrame::Error(ref s) => s == "ERR Client sent AUTH, but no password is set", + Resp3Frame::SimpleError { ref data, .. } => *data == "ERR Client sent AUTH, but no password is set", _ => false, }; if is_auth_error { - ProtocolFrame::SimpleString("OK".into()) + Resp3Frame::SimpleString { + data: "OK".into(), + attributes: None, + } } else { frame } } +/// Try to parse the data as a string, and failing that return a byte slice. +pub fn string_or_bytes(data: Bytes) -> RedisValue { + if let Some(s) = Str::from_inner(data.clone()).ok() { + RedisValue::String(s) + } else { + RedisValue::Bytes(data) + } +} + +pub fn frame_to_bytes(frame: &Resp3Frame) -> Option { + match frame { + Resp3Frame::BigNumber { data, .. } => Some(data.clone()), + Resp3Frame::VerbatimString { data, .. } => Some(data.clone()), + Resp3Frame::BlobString { data, .. } => Some(data.clone()), + Resp3Frame::SimpleString { data, .. } => Some(data.clone()), + Resp3Frame::BlobError { data, .. } => Some(data.clone()), + Resp3Frame::SimpleError { data, .. } => Some(data.inner().clone()), + _ => None, + } +} + +pub fn frame_to_str(frame: &Resp3Frame) -> Option { + match frame { + Resp3Frame::BigNumber { data, .. } => Str::from_inner(data.clone()).ok(), + Resp3Frame::VerbatimString { data, .. } => Str::from_inner(data.clone()).ok(), + Resp3Frame::BlobString { data, .. } => Str::from_inner(data.clone()).ok(), + Resp3Frame::SimpleString { data, .. } => Str::from_inner(data.clone()).ok(), + Resp3Frame::BlobError { data, .. } => Str::from_inner(data.clone()).ok(), + Resp3Frame::SimpleError { data, .. } => Some(data.clone()), + _ => None, + } +} + +fn parse_nested_array(data: Vec) -> Result { + let mut out = Vec::with_capacity(data.len()); + + for frame in data.into_iter() { + out.push(frame_to_results(frame)?); + } + + if out.len() == 1 { + Ok(out.pop().unwrap()) + } else { + Ok(RedisValue::Array(out)) + } +} + +fn parse_nested_map(data: FrameMap) -> Result { + let mut out = HashMap::with_capacity(data.len()); + + // maybe make this smarter, but that would require changing the RedisMap type to use potentially non-hashable types as keys... + for (key, value) in data.into_iter() { + let key: RedisKey = frame_to_single_result(key)?.try_into()?; + let value = frame_to_results(value)?; + + out.insert(key, value); + } + + Ok(RedisMap { inner: out }) +} + /// Parse the protocol frame into a redis value, with support for arbitrarily nested arrays. /// /// If the array contains one element then that element will be returned. -pub fn frame_to_results(frame: ProtocolFrame) -> Result { +pub fn frame_to_results(frame: Resp3Frame) -> Result { let value = match frame { - ProtocolFrame::SimpleString(s) => { - if s.as_str() == QUEUED { + Resp3Frame::Null => RedisValue::Null, + Resp3Frame::SimpleString { data, .. } => { + let value = string_or_bytes(data); + + if value.as_str().map(|s| s == QUEUED).unwrap_or(false) { RedisValue::Queued } else { - s.into() + value } } - ProtocolFrame::BulkString(b) => { - if let Some(s) = str::from_utf8(&b).ok() { - RedisValue::String(s.to_owned()) + Resp3Frame::SimpleError { data, .. } => return Err(pretty_error(&data)), + Resp3Frame::BlobString { data, .. } => string_or_bytes(data), + Resp3Frame::BlobError { data, .. } => { + // errors don't have a great way to represent non-utf8 strings... + let parsed = String::from_utf8_lossy(&data); + return Err(pretty_error(&parsed)); + } + Resp3Frame::VerbatimString { data, .. } => string_or_bytes(data), + Resp3Frame::Number { data, .. } => data.into(), + Resp3Frame::Double { data, .. } => data.into(), + Resp3Frame::BigNumber { data, .. } => string_or_bytes(data), + Resp3Frame::Boolean { data, .. } => data.into(), + Resp3Frame::Array { data, .. } => parse_nested_array(data)?, + Resp3Frame::Push { data, .. } => parse_nested_array(data)?, + Resp3Frame::Set { data, .. } => { + let mut out = Vec::with_capacity(data.len()); + for frame in data.into_iter() { + out.push(frame_to_results(frame)?); + } + + RedisValue::Array(out) + } + Resp3Frame::Map { data, .. } => RedisValue::Map(parse_nested_map(data)?), + _ => { + return Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Invalid response frame type.", + )) + } + }; + + Ok(value) +} + +/// Parse the protocol frame into a redis value, with support for arbitrarily nested arrays. +/// +/// Unlike `frame_to_results` this will not unwrap single-element arrays. +pub fn frame_to_results_raw(frame: Resp3Frame) -> Result { + let value = match frame { + Resp3Frame::Null => RedisValue::Null, + Resp3Frame::SimpleString { data, .. } => { + let value = string_or_bytes(data); + + if value.as_str().map(|s| s == QUEUED).unwrap_or(false) { + RedisValue::Queued } else { - RedisValue::Bytes(b) + value } } - ProtocolFrame::Integer(i) => i.into(), - ProtocolFrame::Null => RedisValue::Null, - ProtocolFrame::Array(frames) => { - let mut out = Vec::with_capacity(frames.len()); + Resp3Frame::SimpleError { data, .. } => return Err(pretty_error(&data)), + Resp3Frame::BlobString { data, .. } => string_or_bytes(data), + Resp3Frame::BlobError { data, .. } => { + // errors don't have a great way to represent non-utf8 strings... + let parsed = String::from_utf8_lossy(&data); + return Err(pretty_error(&parsed)); + } + Resp3Frame::VerbatimString { data, .. } => string_or_bytes(data), + Resp3Frame::Number { data, .. } => data.into(), + Resp3Frame::Double { data, .. } => data.into(), + Resp3Frame::BigNumber { data, .. } => string_or_bytes(data), + Resp3Frame::Boolean { data, .. } => data.into(), + Resp3Frame::Array { data, .. } | Resp3Frame::Push { data, .. } => { + let mut out = Vec::with_capacity(data.len()); + for frame in data.into_iter() { + out.push(frame_to_results_raw(frame)?); + } - for frame in frames.into_iter() { - out.push(frame_to_results(frame)?); + RedisValue::Array(out) + } + Resp3Frame::Set { data, .. } => { + let mut out = Vec::with_capacity(data.len()); + for frame in data.into_iter() { + out.push(frame_to_results_raw(frame)?); } - if out.len() == 1 { - out.pop().unwrap() - } else { - RedisValue::Array(out) + RedisValue::Array(out) + } + Resp3Frame::Map { data, .. } => { + let mut out = HashMap::with_capacity(data.len()); + for (key, value) in data.into_iter() { + let key: RedisKey = frame_to_single_result(key)?.try_into()?; + let value = frame_to_results_raw(value)?; + + out.insert(key, value); } + + RedisValue::Map(RedisMap { inner: out }) + } + _ => { + return Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Invalid response frame type.", + )) } - ProtocolFrame::Error(s) => return Err(pretty_error(&s)), }; Ok(value) } -/// Parse the protocol frame into a single redis value, returning an error if the result contains nested arrays or an array with more than one value. +/// Parse the protocol frame into a single redis value, returning an error if the result contains nested arrays, an array with more than one value, or any other aggregate type. /// /// If the array only contains one value then that value will be returned. /// -/// This function is equivalent to [frame_to_results] but with an added validation layer if the result set is a nested array, etc. -pub fn frame_to_single_result(frame: ProtocolFrame) -> Result { +/// This function is equivalent to [frame_to_results] but with an added validation layer if the result set is a nested array, aggregate type, etc. +pub fn frame_to_single_result(frame: Resp3Frame) -> Result { match frame { - ProtocolFrame::SimpleString(s) => { - if s.as_str() == QUEUED { + Resp3Frame::SimpleString { data, .. } => { + let value = string_or_bytes(data); + + if value.as_str().map(|s| s == QUEUED).unwrap_or(false) { Ok(RedisValue::Queued) } else { - Ok(s.into()) + Ok(value) } } - ProtocolFrame::Integer(i) => Ok(i.into()), - ProtocolFrame::BulkString(b) => { - if let Some(s) = str::from_utf8(&b).ok() { - Ok(RedisValue::String(s.to_owned())) - } else { - Ok(RedisValue::Bytes(b)) - } + Resp3Frame::SimpleError { data, .. } => Err(pretty_error(&data)), + Resp3Frame::Number { data, .. } => Ok(data.into()), + Resp3Frame::Double { data, .. } => Ok(data.into()), + Resp3Frame::BigNumber { data, .. } => Ok(string_or_bytes(data)), + Resp3Frame::Boolean { data, .. } => Ok(data.into()), + Resp3Frame::VerbatimString { data, .. } => Ok(string_or_bytes(data)), + Resp3Frame::BlobString { data, .. } => Ok(string_or_bytes(data)), + Resp3Frame::BlobError { data, .. } => { + // errors don't have a great way to represent non-utf8 strings... + let parsed = String::from_utf8_lossy(&data); + Err(pretty_error(&parsed)) } - ProtocolFrame::Array(mut frames) => { - if frames.len() > 1 { + Resp3Frame::Array { mut data, .. } | Resp3Frame::Push { mut data, .. } => { + if data.len() > 1 { return Err(RedisError::new( RedisErrorKind::ProtocolError, "Could not convert multiple frames to RedisValue.", )); - } else if frames.is_empty() { + } else if data.is_empty() { return Ok(RedisValue::Null); } - let first_frame = frames.pop().unwrap(); - if first_frame.kind() == ProtocolFrameKind::Array || first_frame.kind() == ProtocolFrameKind::Error { + let first_frame = data.pop().unwrap(); + if first_frame.is_array() || first_frame.is_error() { // there shouldn't be errors buried in arrays, nor should there be more than one layer of nested arrays - return Err(RedisError::new(RedisErrorKind::ProtocolError, "Invalid nested array.")); + return Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Invalid nested array or error.", + )); } frame_to_single_result(first_frame) } - ProtocolFrame::Null => Ok(RedisValue::Null), - ProtocolFrame::Error(s) => Err(pretty_error(&s)), + Resp3Frame::Map { .. } | Resp3Frame::Set { .. } => Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Invalid aggregate type.", + )), + Resp3Frame::Null => Ok(RedisValue::Null), + _ => Err(RedisError::new(RedisErrorKind::ProtocolError, "Unexpected frame kind.")), } } -/// Convert a frame to a nested RedisMap. -pub fn frame_to_map(frame: ProtocolFrame) -> Result { - if let ProtocolFrame::Array(mut frames) = frame { - if frames.is_empty() { - return Ok(RedisMap::new()); - } - if frames.len() % 2 != 0 { - return Err(RedisError::new( - RedisErrorKind::ProtocolError, - "Expected an even number of frames.", - )); - } - - let mut inner = utils::new_map(frames.len() / 2); - while frames.len() >= 2 { - let value = frames.pop().unwrap(); - let key = match frames.pop().unwrap().as_str() { - Some(k) => k.to_owned(), - None => return Err(RedisError::new(RedisErrorKind::ProtocolError, "Expected string key.")), - }; - let value = frame_to_single_result(value)?; +/// Flatten a single nested layer of arrays or sets into one array. +pub fn flatten_frame(frame: Resp3Frame) -> Resp3Frame { + match frame { + Resp3Frame::Array { data, .. } => { + let count = data.iter().fold(0, |c, f| { + c + match f { + Resp3Frame::Push { ref data, .. } => data.len(), + Resp3Frame::Array { ref data, .. } => data.len(), + Resp3Frame::Set { ref data, .. } => data.len(), + _ => 1, + } + }); + + let mut out = Vec::with_capacity(count); + for frame in data.into_iter() { + match frame { + Resp3Frame::Push { data, .. } => out.extend(data), + Resp3Frame::Array { data, .. } => out.extend(data), + Resp3Frame::Set { data, .. } => out.extend(data), + _ => out.push(frame), + }; + } - inner.insert(key, value); + Resp3Frame::Array { + data: out, + attributes: None, + } } + Resp3Frame::Set { data, .. } => { + let count = data.iter().fold(0, |c, f| { + c + match f { + Resp3Frame::Array { ref data, .. } => data.len(), + Resp3Frame::Set { ref data, .. } => data.len(), + _ => 1, + } + }); + + let mut out = Vec::with_capacity(count); + for frame in data.into_iter() { + match frame { + Resp3Frame::Array { data, .. } => out.extend(data), + Resp3Frame::Set { data, .. } => out.extend(data), + _ => out.push(frame), + }; + } - Ok(RedisMap { inner }) - } else { - Err(RedisError::new( - RedisErrorKind::ProtocolError, - "Expected array of frames.", - )) + Resp3Frame::Array { + data: out, + attributes: None, + } + } + _ => frame, } } -/// Convert a redis array value to a redis map. -#[allow(dead_code)] -pub fn array_to_map(data: RedisValue) -> Result { - if let RedisValue::Array(mut values) = data { - if values.is_empty() { - return Ok(RedisMap::new()); - } - if values.len() % 2 != 0 { - return Err(RedisError::new( - RedisErrorKind::ProtocolError, - "Expected an even number of array frames.", - )); - } +/// Convert a frame to a nested RedisMap. +pub fn frame_to_map(frame: Resp3Frame) -> Result { + match frame { + Resp3Frame::Array { mut data, .. } => { + if data.is_empty() { + return Ok(RedisMap::new()); + } + if data.len() % 2 != 0 { + return Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Expected an even number of frames.", + )); + } - let mut inner = utils::new_map(values.len() / 2); - while values.len() >= 2 { - let value = values.pop().unwrap(); - let key = match values.pop().unwrap().into_string() { - Some(k) => k, - None => return Err(RedisError::new(RedisErrorKind::ProtocolError, "Expected string key.")), - }; + let mut inner = HashMap::with_capacity(data.len() / 2); + while data.len() >= 2 { + let value = frame_to_results(data.pop().unwrap())?; + let key = frame_to_single_result(data.pop().unwrap())?.try_into()?; - inner.insert(key, value); - } + inner.insert(key, value); + } - Ok(RedisMap { inner }) - } else { - Err(RedisError::new( + Ok(RedisMap { inner }) + } + Resp3Frame::Map { data, .. } => parse_nested_map(data), + _ => Err(RedisError::new( RedisErrorKind::ProtocolError, - "Expected array of frames.", - )) + "Expected array or map frames.", + )), } } -pub fn frame_to_error(frame: &ProtocolFrame) -> Option { +pub fn frame_to_error(frame: &Resp3Frame) -> Option { match frame { - ProtocolFrame::Error(ref s) => Some(pretty_error(s)), + Resp3Frame::SimpleError { ref data, .. } => Some(pretty_error(data)), + Resp3Frame::BlobError { ref data, .. } => { + let parsed = String::from_utf8_lossy(data); + Some(pretty_error(&parsed)) + } _ => None, } } -pub fn value_to_outgoing_frame(value: &RedisValue) -> Result { +pub fn value_to_outgoing_resp2_frame(value: &RedisValue) -> Result { let frame = match value { - RedisValue::Integer(ref i) => ProtocolFrame::BulkString(i.to_string().into_bytes()), - RedisValue::String(ref s) => ProtocolFrame::BulkString(s.as_bytes().to_vec()), - RedisValue::Bytes(ref b) => ProtocolFrame::BulkString(b.to_vec()), - RedisValue::Queued => ProtocolFrame::BulkString(QUEUED.as_bytes().to_vec()), - RedisValue::Null => ProtocolFrame::Null, - // TODO implement when RESP3 support is in redis-protocol + RedisValue::Double(ref f) => Resp2Frame::BulkString(f.to_string().into()), + RedisValue::Boolean(ref b) => Resp2Frame::BulkString(b.to_string().into()), + RedisValue::Integer(ref i) => Resp2Frame::BulkString(i.to_string().into()), + RedisValue::String(ref s) => Resp2Frame::BulkString(s.inner().clone()), + RedisValue::Bytes(ref b) => Resp2Frame::BulkString(b.clone()), + RedisValue::Queued => Resp2Frame::BulkString(Bytes::from_static(QUEUED.as_bytes())), + RedisValue::Null => Resp2Frame::Null, + _ => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + format!("Invalid argument type: {}", value.kind()), + )) + } + }; + + Ok(frame) +} + +pub fn value_to_outgoing_resp3_frame(value: &RedisValue) -> Result { + let frame = match value { + RedisValue::Double(ref f) => Resp3Frame::BlobString { + data: f.to_string().into(), + attributes: None, + }, + RedisValue::Boolean(ref b) => Resp3Frame::BlobString { + data: b.to_string().into(), + attributes: None, + }, + RedisValue::Integer(ref i) => Resp3Frame::BlobString { + data: i.to_string().into(), + attributes: None, + }, + RedisValue::String(ref s) => Resp3Frame::BlobString { + data: s.inner().clone(), + attributes: None, + }, + RedisValue::Bytes(ref b) => Resp3Frame::BlobString { + data: b.clone(), + attributes: None, + }, + RedisValue::Queued => Resp3Frame::BlobString { + data: Bytes::from_static(QUEUED.as_bytes()), + attributes: None, + }, + RedisValue::Null => Resp3Frame::Null, _ => { return Err(RedisError::new( RedisErrorKind::InvalidArgument, @@ -431,7 +731,7 @@ pub fn value_to_outgoing_frame(value: &RedisValue) -> Result Result<(), RedisError> { match *value { RedisValue::String(ref resp) => { - if resp == OK { + if resp.deref() == OK { Ok(()) } else { Err(RedisError::new( @@ -447,34 +747,37 @@ pub fn expect_ok(value: &RedisValue) -> Result<(), RedisError> { } } -fn parse_u64(val: &ProtocolFrame) -> u64 { +fn parse_u64(val: &Resp3Frame) -> u64 { match *val { - ProtocolFrame::Integer(i) => { - if i < 0 { + Resp3Frame::Number { ref data, .. } => { + if *data < 0 { 0 } else { - i as u64 + *data as u64 } } - ProtocolFrame::SimpleString(ref s) => s.parse::().ok().unwrap_or(0), - ProtocolFrame::BulkString(ref s) => str::from_utf8(s).ok().and_then(|s| s.parse::().ok()).unwrap_or(0), + Resp3Frame::Double { ref data, .. } => *data as u64, + Resp3Frame::BlobString { ref data, .. } | Resp3Frame::SimpleString { ref data, .. } => str::from_utf8(data) + .ok() + .and_then(|s| s.parse::().ok()) + .unwrap_or(0), _ => 0, } } -fn parse_f64(val: &ProtocolFrame) -> f64 { +fn parse_f64(val: &Resp3Frame) -> f64 { match *val { - ProtocolFrame::Integer(i) => i as f64, - ProtocolFrame::SimpleString(ref s) => s.parse::().ok().unwrap_or(0.0), - ProtocolFrame::BulkString(ref s) => str::from_utf8(s) + Resp3Frame::Number { ref data, .. } => *data as f64, + Resp3Frame::Double { ref data, .. } => *data, + Resp3Frame::BlobString { ref data, .. } | Resp3Frame::SimpleString { ref data, .. } => str::from_utf8(data) .ok() - .and_then(|s| s.parse::().ok()) + .and_then(|s| redis_string_to_f64(s).ok()) .unwrap_or(0.0), _ => 0.0, } } -fn parse_db_memory_stats(data: &Vec) -> Result { +fn parse_db_memory_stats(data: &Vec) -> Result { if data.len() % 2 != 0 { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -499,7 +802,7 @@ fn parse_db_memory_stats(data: &Vec) -> Result stats.peak_allocated = parse_u64(value), "total.allocated" => stats.total_allocated = parse_u64(value), @@ -530,7 +833,7 @@ fn parse_memory_stat_field(stats: &mut MemoryStats, key: &str, value: &ProtocolF } } -pub fn parse_memory_stats(data: &Vec) -> Result { +pub fn parse_memory_stats(data: &Vec) -> Result { if data.len() % 2 != 0 { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -555,7 +858,7 @@ pub fn parse_memory_stats(data: &Vec) -> Result inner, + Resp3Frame::Array { ref data, .. } => data, _ => continue, }; let parsed = parse_db_memory_stats(inner)?; @@ -569,11 +872,11 @@ pub fn parse_memory_stats(data: &Vec) -> Result Result, RedisError> { - if let ProtocolFrame::Array(ref frames) = value { - let mut out = Vec::with_capacity(frames.len()); +fn parse_acl_getuser_flag(value: &Resp3Frame) -> Result, RedisError> { + if let Resp3Frame::Array { ref data, .. } = value { + let mut out = Vec::with_capacity(data.len()); - for frame in frames.iter() { + for frame in data.iter() { let flag = match frame.as_str() { Some(s) => match s.as_ref() { "on" => AclUserFlag::On, @@ -599,11 +902,11 @@ fn parse_acl_getuser_flag(value: &ProtocolFrame) -> Result, Red } } -fn frames_to_strings(frames: &ProtocolFrame) -> Result, RedisError> { - if let ProtocolFrame::Array(ref frames) = frames { - let mut out = Vec::with_capacity(frames.len()); +fn frames_to_strings(frames: &Resp3Frame) -> Result, RedisError> { + if let Resp3Frame::Array { ref data, .. } = frames { + let mut out = Vec::with_capacity(data.len()); - for frame in frames.iter() { + for frame in data.iter() { let val = match frame.as_str() { Some(v) => v.to_owned(), None => continue, @@ -621,7 +924,7 @@ fn frames_to_strings(frames: &ProtocolFrame) -> Result, RedisError> } } -fn parse_acl_getuser_field(user: &mut AclUser, key: &str, value: &ProtocolFrame) -> Result<(), RedisError> { +fn parse_acl_getuser_field(user: &mut AclUser, key: &str, value: &Resp3Frame) -> Result<(), RedisError> { match key.as_ref() { "passwords" => user.passwords = frames_to_strings(value)?, "keys" => user.keys = frames_to_strings(value)?, @@ -642,7 +945,36 @@ fn parse_acl_getuser_field(user: &mut AclUser, key: &str, value: &ProtocolFrame) Ok(()) } -pub fn parse_acl_getuser_frames(frames: Vec) -> Result { +pub fn frame_map_or_set_to_nested_array(frame: Resp3Frame) -> Result { + match frame { + Resp3Frame::Map { data, .. } => { + let mut out = Vec::with_capacity(data.len() * 2); + for (key, value) in data.into_iter() { + out.push(key); + out.push(frame_map_or_set_to_nested_array(value)?); + } + + Ok(Resp3Frame::Array { + data: out, + attributes: None, + }) + } + Resp3Frame::Set { data, .. } => { + let mut out = Vec::with_capacity(data.len()); + for frame in data.into_iter() { + out.push(frame_map_or_set_to_nested_array(frame)?); + } + + Ok(Resp3Frame::Array { + data: out, + attributes: None, + }) + } + _ => Ok(frame), + } +} + +pub fn parse_acl_getuser_frames(frames: Vec) -> Result { if frames.len() % 2 != 0 || frames.len() > 10 { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -667,7 +999,7 @@ pub fn parse_acl_getuser_frames(frames: Vec) -> Result) -> Result { +fn parse_slowlog_entry(frames: Vec) -> Result { if frames.len() < 4 { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -676,11 +1008,11 @@ fn parse_slowlog_entry(frames: Vec) -> Result *i, + Resp3Frame::Number { ref data, .. } => *data, _ => return Err(RedisError::new(RedisErrorKind::ProtocolError, "Expected integer ID.")), }; let timestamp = match frames[1] { - ProtocolFrame::Integer(ref i) => *i, + Resp3Frame::Number { ref data, .. } => *data, _ => { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -689,7 +1021,7 @@ fn parse_slowlog_entry(frames: Vec) -> Result *i as u64, + Resp3Frame::Number { ref data, .. } => *data as u64, _ => { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -698,7 +1030,10 @@ fn parse_slowlog_entry(frames: Vec) -> Result args.iter().filter_map(|a| a.as_str().map(|s| s.to_owned())).collect(), + Resp3Frame::Array { ref data, .. } => data + .iter() + .filter_map(|frame| frame.as_str().map(|s| s.to_owned())) + .collect(), _ => { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -742,12 +1077,12 @@ fn parse_slowlog_entry(frames: Vec) -> Result) -> Result, RedisError> { +pub fn parse_slowlog_entries(frames: Vec) -> Result, RedisError> { let mut out = Vec::with_capacity(frames.len()); for frame in frames.into_iter() { - if let ProtocolFrame::Array(frames) = frame { - out.push(parse_slowlog_entry(frames)?); + if let Resp3Frame::Array { data, .. } = frame { + out.push(parse_slowlog_entry(data)?); } else { return Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -793,7 +1128,7 @@ fn parse_cluster_info_line(info: &mut ClusterInfo, line: &str) -> Result<(), Red Ok(()) } -pub fn parse_cluster_info(data: ProtocolFrame) -> Result { +pub fn parse_cluster_info(data: Resp3Frame) -> Result { if let Some(data) = data.as_str() { let mut out = ClusterInfo::default(); @@ -812,19 +1147,27 @@ pub fn parse_cluster_info(data: ProtocolFrame) -> Result Result { - if let Some(s) = frame.as_str() { - utils::redis_string_to_f64(s) - } else { - Err(RedisError::new(RedisErrorKind::ProtocolError, "Expected bulk string.")) +fn frame_to_f64(frame: &Resp3Frame) -> Result { + match frame { + Resp3Frame::Double { ref data, .. } => Ok(*data), + _ => { + if let Some(s) = frame.as_str() { + utils::redis_string_to_f64(s) + } else { + Err(RedisError::new( + RedisErrorKind::ProtocolError, + "Expected bulk string or double.", + )) + } + } } } -pub fn parse_geo_position(frame: &ProtocolFrame) -> Result { - if let ProtocolFrame::Array(ref frames) = frame { - if frames.len() == 2 { - let longitude = frame_to_f64(&frames[0])?; - let latitude = frame_to_f64(&frames[1])?; +pub fn parse_geo_position(frame: &Resp3Frame) -> Result { + if let Resp3Frame::Array { ref data, .. } = frame { + if data.len() == 2 { + let longitude = frame_to_f64(&data[0])?; + let latitude = frame_to_f64(&data[1])?; Ok(GeoPosition { longitude, latitude }) } else { @@ -838,7 +1181,7 @@ pub fn parse_geo_position(frame: &ProtocolFrame) -> Result, len: usize) -> Result<(), RedisError> { +fn assert_frame_len(frames: &Vec, len: usize) -> Result<(), RedisError> { if frames.len() != len { Err(RedisError::new( RedisErrorKind::ProtocolError, @@ -849,84 +1192,87 @@ fn assert_frame_len(frames: &Vec, len: usize) -> Result<(), Redis } } -fn parse_geo_member(frame: &ProtocolFrame) -> Result { +fn parse_geo_member(frame: &Resp3Frame) -> Result { frame .as_str() .ok_or(RedisError::new(RedisErrorKind::ProtocolError, "Expected string")) .map(|s| s.into()) } -fn parse_geo_dist(frame: &ProtocolFrame) -> Result { - frame - .as_str() - .ok_or(RedisError::new(RedisErrorKind::ProtocolError, "Expected double.")) - .and_then(|s| utils::redis_string_to_f64(s)) +fn parse_geo_dist(frame: &Resp3Frame) -> Result { + match frame { + Resp3Frame::Double { ref data, .. } => Ok(*data), + _ => frame + .as_str() + .ok_or(RedisError::new(RedisErrorKind::ProtocolError, "Expected double.")) + .and_then(|s| utils::redis_string_to_f64(s)), + } } -fn parse_geo_hash(frame: &ProtocolFrame) -> Result { - if let ProtocolFrame::Integer(ref i) = frame { - Ok(*i) +fn parse_geo_hash(frame: &Resp3Frame) -> Result { + if let Resp3Frame::Number { ref data, .. } = frame { + Ok(*data) } else { Err(RedisError::new(RedisErrorKind::ProtocolError, "Expected integer.")) } } pub fn parse_georadius_info( - frame: &ProtocolFrame, + frame: &Resp3Frame, withcoord: bool, withdist: bool, withhash: bool, ) -> Result { - if let ProtocolFrame::Array(ref frames) = frame { + if let Resp3Frame::Array { ref data, .. } = frame { let mut out = GeoRadiusInfo::default(); if withcoord && withdist && withhash { // 4 elements: member, dist, hash, position - let _ = assert_frame_len(frames, 4)?; + let _ = assert_frame_len(data, 4)?; - out.member = parse_geo_member(&frames[0])?; - out.distance = Some(parse_geo_dist(&frames[1])?); - out.hash = Some(parse_geo_hash(&frames[2])?); - out.position = Some(parse_geo_position(&frames[3])?); + out.member = parse_geo_member(&data[0])?; + out.distance = Some(parse_geo_dist(&data[1])?); + out.hash = Some(parse_geo_hash(&data[2])?); + out.position = Some(parse_geo_position(&data[3])?); } else if withcoord && withdist { // 3 elements: member, dist, position - let _ = assert_frame_len(frames, 3)?; + let _ = assert_frame_len(data, 3)?; - out.member = parse_geo_member(&frames[0])?; - out.distance = Some(parse_geo_dist(&frames[1])?); - out.position = Some(parse_geo_position(&frames[2])?); + out.member = parse_geo_member(&data[0])?; + out.distance = Some(parse_geo_dist(&data[1])?); + out.position = Some(parse_geo_position(&data[2])?); } else if withcoord && withhash { // 3 elements: member, hash, position - let _ = assert_frame_len(frames, 3)?; + let _ = assert_frame_len(data, 3)?; - out.member = parse_geo_member(&frames[0])?; - out.hash = Some(parse_geo_hash(&frames[1])?); - out.position = Some(parse_geo_position(&frames[2])?); + out.member = parse_geo_member(&data[0])?; + out.hash = Some(parse_geo_hash(&data[1])?); + out.position = Some(parse_geo_position(&data[2])?); } else if withdist && withhash { // 3 elements: member, dist, hash - let _ = assert_frame_len(frames, 3)?; + let _ = assert_frame_len(data, 3)?; - out.member = parse_geo_member(&frames[0])?; - out.distance = Some(parse_geo_dist(&frames[1])?); - out.hash = Some(parse_geo_hash(&frames[2])?); + out.member = parse_geo_member(&data[0])?; + out.distance = Some(parse_geo_dist(&data[1])?); + out.hash = Some(parse_geo_hash(&data[2])?); } else if withcoord { // 2 elements: member, position - let _ = assert_frame_len(frames, 2)?; + let _ = assert_frame_len(data, 2)?; - out.member = parse_geo_member(&frames[0])?; - out.position = Some(parse_geo_position(&frames[1])?); + out.member = parse_geo_member(&data[0])?; + out.position = Some(parse_geo_position(&data[1])?); } else if withdist { // 2 elements: member, dist - let _ = assert_frame_len(frames, 2)?; + let _ = assert_frame_len(data, 2)?; - out.member = parse_geo_member(&frames[0])?; - out.distance = Some(parse_geo_dist(&frames[1])?); + out.member = parse_geo_member(&data[0])?; + out.distance = Some(parse_geo_dist(&data[1])?); } else if withhash { // 2 elements: member, hash - let _ = assert_frame_len(frames, 2)?; + let _ = assert_frame_len(data, 2)?; - out.member = parse_geo_member(&frames[0])?; - out.hash = Some(parse_geo_hash(&frames[1])?); + out.member = parse_geo_member(&data[0])?; + out.hash = Some(parse_geo_hash(&data[1])?); } Ok(out) @@ -949,15 +1295,15 @@ pub fn parse_georadius_info( } pub fn parse_georadius_result( - frame: ProtocolFrame, + frame: Resp3Frame, withcoord: bool, withdist: bool, withhash: bool, ) -> Result, RedisError> { - if let ProtocolFrame::Array(frames) = frame { - let mut out = Vec::with_capacity(frames.len()); + if let Resp3Frame::Array { data, .. } = frame { + let mut out = Vec::with_capacity(data.len()); - for frame in frames.into_iter() { + for frame in data.into_iter() { out.push(parse_georadius_info(&frame, withcoord, withdist, withhash)?); } @@ -967,8 +1313,29 @@ pub fn parse_georadius_result( } } +/// Flatten a nested array of values into one array. +pub fn flatten_redis_value(value: RedisValue) -> RedisValue { + if let RedisValue::Array(values) = value { + let mut out = Vec::with_capacity(values.len()); + for value in values.into_iter() { + let flattened = flatten_redis_value(value); + if let RedisValue::Array(flattened) = flattened { + out.extend(flattened); + } else { + out.push(flattened); + } + } + + RedisValue::Array(out) + } else { + value + } +} + /// Convert a redis value to an array of (value, score) tuples. pub fn value_to_zset_result(value: RedisValue) -> Result, RedisError> { + let value = flatten_redis_value(value); + if let RedisValue::Array(mut values) = value { if values.is_empty() { return Ok(Vec::new()); @@ -1017,9 +1384,15 @@ fn i64_size(i: i64) -> usize { #[cfg(any(feature = "blocking-encoding", feature = "partial-tracing", feature = "full-tracing"))] pub fn arg_size(value: &RedisValue) -> usize { match value { + // use the RESP2 size + RedisValue::Boolean(_) => 5, + // FIXME make this more accurate by casting to an i64 and using `digits_in_number` + // the tricky part is doing so without allocating and without any loss in precision, but + // this is only used for logging and tracing + RedisValue::Double(_) => 10, RedisValue::Null => 3, RedisValue::Integer(ref i) => i64_size(*i), - RedisValue::String(ref s) => s.as_bytes().len(), + RedisValue::String(ref s) => s.inner().len(), RedisValue::Bytes(ref b) => b.len(), RedisValue::Array(ref arr) => args_size(arr), RedisValue::Map(ref map) => map @@ -1031,14 +1404,27 @@ pub fn arg_size(value: &RedisValue) -> usize { } #[cfg(any(feature = "blocking-encoding", feature = "partial-tracing", feature = "full-tracing"))] -pub fn frame_size(frame: &Frame) -> usize { +pub fn resp2_frame_size(frame: &Resp2Frame) -> usize { match frame { - Frame::Integer(ref i) => i64_size(*i), - Frame::Null => 3, - Frame::Error(ref s) => s.as_bytes().len(), - Frame::SimpleString(ref s) => s.as_bytes().len(), - Frame::BulkString(ref b) => b.len(), - Frame::Array(ref a) => a.iter().fold(0, |c, f| c + frame_size(f)), + Resp2Frame::Integer(ref i) => i64_size(*i), + Resp2Frame::Null => 3, + Resp2Frame::Error(ref s) => s.as_bytes().len(), + Resp2Frame::SimpleString(ref s) => s.len(), + Resp2Frame::BulkString(ref b) => b.len(), + Resp2Frame::Array(ref a) => a.iter().fold(0, |c, f| c + resp2_frame_size(f)), + } +} + +#[cfg(any(feature = "blocking-encoding", feature = "partial-tracing", feature = "full-tracing"))] +pub fn resp3_frame_size(frame: &Resp3Frame) -> usize { + frame.encode_len().unwrap_or(0) +} + +#[cfg(any(feature = "blocking-encoding", feature = "partial-tracing", feature = "full-tracing"))] +pub fn frame_size(frame: &ProtocolFrame) -> usize { + match frame { + ProtocolFrame::Resp3(f) => resp3_frame_size(f), + ProtocolFrame::Resp2(f) => resp2_frame_size(f), } } @@ -1047,52 +1433,164 @@ pub fn args_size(args: &Vec) -> usize { args.iter().fold(0, |c, arg| c + arg_size(arg)) } -pub fn command_to_frame(command: &RedisCommand) -> Result { - if let RedisCommandKind::_Custom(ref kind) = command.kind { - let parts: Vec<&str> = kind.cmd.trim().split(" ").collect(); - let mut bulk_strings = Vec::with_capacity(parts.len() + command.args.len()); +fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result { + let auth = if command.args.len() == 2 { + // has username and password + let username = match command.args[0].as_bytes_str() { + Some(username) => username, + None => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Invalid username. Expected string.", + )); + } + }; + let password = match command.args[1].as_bytes_str() { + Some(password) => password, + None => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Invalid password. Expected string.", + )); + } + }; - for part in parts.into_iter() { - bulk_strings.push(ProtocolFrame::BulkString(part.as_bytes().to_vec())); + Some(Auth { username, password }) + } else if command.args.len() == 1 { + // just has a password (assume the default user) + let password = match command.args[0].as_bytes_str() { + Some(password) => password, + None => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Invalid password. Expected string.", + )); + } + }; + + Some(Auth::from_password(password)) + } else { + None + }; + + Ok(Resp3Frame::Hello { + version: version.clone(), + auth, + }) +} + +pub fn command_to_resp3_frame(command: &RedisCommand) -> Result { + match command.kind { + RedisCommandKind::_Custom(ref kind) => { + let parts: Vec<&str> = kind.cmd.trim().split(" ").collect(); + let mut bulk_strings = Vec::with_capacity(parts.len() + command.args.len()); + + for part in parts.into_iter() { + bulk_strings.push(Resp3Frame::BlobString { + data: part.as_bytes().to_vec().into(), + attributes: None, + }); + } + for value in command.args.iter() { + bulk_strings.push(value_to_outgoing_resp3_frame(value)?); + } + + Ok(Resp3Frame::Array { + data: bulk_strings, + attributes: None, + }) } - for value in command.args.iter() { - bulk_strings.push(value_to_outgoing_frame(value)?); + RedisCommandKind::Hello(ref version) => serialize_hello(command, version), + _ => { + let mut bulk_strings = Vec::with_capacity(command.args.len() + 2); + + bulk_strings.push(Resp3Frame::BlobString { + data: command.kind.cmd_str().into_inner(), + attributes: None, + }); + + if let Some(subcommand) = command.kind.subcommand_str() { + bulk_strings.push(Resp3Frame::BlobString { + data: Bytes::from_static(subcommand.as_bytes()), + attributes: None, + }); + } + for value in command.args.iter() { + bulk_strings.push(value_to_outgoing_resp3_frame(value)?); + } + + Ok(Resp3Frame::Array { + data: bulk_strings, + attributes: None, + }) } + } +} - Ok(ProtocolFrame::Array(bulk_strings)) - } else { - let mut bulk_strings = Vec::with_capacity(command.args.len() + 2); +pub fn command_to_resp2_frame(command: &RedisCommand) -> Result { + match command.kind { + RedisCommandKind::_Custom(ref kind) => { + let parts: Vec<&str> = kind.cmd.trim().split(" ").collect(); + let mut bulk_strings = Vec::with_capacity(parts.len() + command.args.len()); - let cmd = command.kind.cmd_str().as_bytes(); - bulk_strings.push(ProtocolFrame::BulkString(cmd.to_vec())); + for part in parts.into_iter() { + bulk_strings.push(Resp2Frame::BulkString(part.as_bytes().to_vec().into())); + } + for value in command.args.iter() { + bulk_strings.push(value_to_outgoing_resp2_frame(value)?); + } - if let Some(subcommand) = command.kind.subcommand_str() { - bulk_strings.push(ProtocolFrame::BulkString(subcommand.as_bytes().to_vec())); + Ok(Resp2Frame::Array(bulk_strings)) } - for value in command.args.iter() { - bulk_strings.push(value_to_outgoing_frame(value)?); + _ => { + let mut bulk_strings = Vec::with_capacity(command.args.len() + 2); + + bulk_strings.push(Resp2Frame::BulkString(command.kind.cmd_str().into_inner())); + if let Some(subcommand) = command.kind.subcommand_str() { + bulk_strings.push(Resp2Frame::BulkString(Bytes::from_static(subcommand.as_bytes()))); + } + for value in command.args.iter() { + bulk_strings.push(value_to_outgoing_resp2_frame(value)?); + } + + Ok(Resp2Frame::Array(bulk_strings)) } + } +} - Ok(ProtocolFrame::Array(bulk_strings)) +/// Serialize the command as a protocol frame. +pub fn command_to_frame(command: &RedisCommand, is_resp3: bool) -> Result { + if is_resp3 || command.kind.is_hello() { + command_to_resp3_frame(command).map(|c| c.into()) + } else { + command_to_resp2_frame(command).map(|c| c.into()) } } #[cfg(test)] mod tests { use super::*; - use std::collections::HashMap; - fn str_to_f(s: &str) -> ProtocolFrame { - ProtocolFrame::SimpleString(s.to_owned()) + fn str_to_f(s: &str) -> Resp3Frame { + Resp3Frame::SimpleString { + data: s.to_owned().into(), + attributes: None, + } } - fn str_to_bs(s: &str) -> ProtocolFrame { - ProtocolFrame::BulkString(s.as_bytes().to_vec()) + fn str_to_bs(s: &str) -> Resp3Frame { + Resp3Frame::BlobString { + data: s.to_owned().into(), + attributes: None, + } } - fn int_to_f(i: i64) -> ProtocolFrame { - ProtocolFrame::Integer(i) + fn int_to_f(i: i64) -> Resp3Frame { + Resp3Frame::Number { + data: i, + attributes: None, + } } fn string_vec(d: Vec<&str>) -> Vec { @@ -1102,7 +1600,7 @@ mod tests { #[test] fn should_parse_memory_stats() { // better from()/into() interfaces for frames coming in the next redis-protocol version... - let frames: Vec = vec![ + let frames: Vec = vec![ str_to_f("peak.allocated"), int_to_f(934192), str_to_f("total.allocated"), @@ -1120,12 +1618,15 @@ mod tests { str_to_f("lua.caches"), int_to_f(0), str_to_f("db.0"), - ProtocolFrame::Array(vec![ - str_to_f("overhead.hashtable.main"), - int_to_f(72), - str_to_f("overhead.hashtable.expires"), - int_to_f(0), - ]), + Resp3Frame::Array { + data: vec![ + str_to_f("overhead.hashtable.main"), + int_to_f(72), + str_to_f("overhead.hashtable.expires"), + int_to_f(0), + ], + attributes: None, + }, str_to_f("overhead.total"), int_to_f(830480), str_to_f("keys.count"), @@ -1223,18 +1724,30 @@ mod tests { let input = vec![ str_to_bs("flags"), - ProtocolFrame::Array(vec![str_to_bs("on")]), + Resp3Frame::Array { + data: vec![str_to_bs("on")], + attributes: None, + }, str_to_bs("passwords"), - ProtocolFrame::Array(vec![ - str_to_bs("c56e8629954a900e993e84ed3d4b134b9450da1b411a711d047d547808c3ece5"), - str_to_bs("39b039a94deaa548cf6382282c4591eccdc648706f9d608eceb687d452a31a45"), - ]), + Resp3Frame::Array { + data: vec![ + str_to_bs("c56e8629954a900e993e84ed3d4b134b9450da1b411a711d047d547808c3ece5"), + str_to_bs("39b039a94deaa548cf6382282c4591eccdc648706f9d608eceb687d452a31a45"), + ], + attributes: None, + }, str_to_bs("commands"), str_to_bs("-@all +@sortedset +@geo +config|get"), str_to_bs("keys"), - ProtocolFrame::Array(vec![str_to_bs("a"), str_to_bs("b"), str_to_bs("c")]), + Resp3Frame::Array { + data: vec![str_to_bs("a"), str_to_bs("b"), str_to_bs("c")], + attributes: None, + }, str_to_bs("channels"), - ProtocolFrame::Array(vec![str_to_bs("c1"), str_to_bs("c2")]), + Resp3Frame::Array { + data: vec![str_to_bs("c1"), str_to_bs("c2")], + attributes: None, + }, ]; let actual = parse_acl_getuser_frames(input).unwrap(); @@ -1268,18 +1781,30 @@ mod tests { */ let input = vec![ - ProtocolFrame::Array(vec![ - int_to_f(14), - int_to_f(1309448221), - int_to_f(15), - ProtocolFrame::Array(vec![str_to_bs("ping")]), - ]), - ProtocolFrame::Array(vec![ - int_to_f(13), - int_to_f(1309448128), - int_to_f(30), - ProtocolFrame::Array(vec![str_to_bs("slowlog"), str_to_bs("get"), str_to_bs("100")]), - ]), + Resp3Frame::Array { + data: vec![ + int_to_f(14), + int_to_f(1309448221), + int_to_f(15), + Resp3Frame::Array { + data: vec![str_to_bs("ping")], + attributes: None, + }, + ], + attributes: None, + }, + Resp3Frame::Array { + data: vec![ + int_to_f(13), + int_to_f(1309448128), + int_to_f(30), + Resp3Frame::Array { + data: vec![str_to_bs("slowlog"), str_to_bs("get"), str_to_bs("100")], + attributes: None, + }, + ], + attributes: None, + }, ]; let actual = parse_slowlog_entries(input).unwrap(); @@ -1326,22 +1851,34 @@ mod tests { */ let input = vec![ - ProtocolFrame::Array(vec![ - int_to_f(14), - int_to_f(1309448221), - int_to_f(15), - ProtocolFrame::Array(vec![str_to_bs("ping")]), - str_to_bs("127.0.0.1:58217"), - str_to_bs("worker-123"), - ]), - ProtocolFrame::Array(vec![ - int_to_f(13), - int_to_f(1309448128), - int_to_f(30), - ProtocolFrame::Array(vec![str_to_bs("slowlog"), str_to_bs("get"), str_to_bs("100")]), - str_to_bs("127.0.0.1:58217"), - str_to_bs("worker-123"), - ]), + Resp3Frame::Array { + data: vec![ + int_to_f(14), + int_to_f(1309448221), + int_to_f(15), + Resp3Frame::Array { + data: vec![str_to_bs("ping")], + attributes: None, + }, + str_to_bs("127.0.0.1:58217"), + str_to_bs("worker-123"), + ], + attributes: None, + }, + Resp3Frame::Array { + data: vec![ + int_to_f(13), + int_to_f(1309448128), + int_to_f(30), + Resp3Frame::Array { + data: vec![str_to_bs("slowlog"), str_to_bs("get"), str_to_bs("100")], + attributes: None, + }, + str_to_bs("127.0.0.1:58217"), + str_to_bs("worker-123"), + ], + attributes: None, + }, ]; let actual = parse_slowlog_entries(input).unwrap(); @@ -1395,7 +1932,11 @@ cluster_stats_messages_received:1483968"; cluster_stats_messages_received: 1483968, }; - let actual = parse_cluster_info(ProtocolFrame::BulkString(input.as_bytes().to_vec())).unwrap(); + let actual = parse_cluster_info(Resp3Frame::BlobString { + data: input.as_bytes().into(), + attributes: None, + }) + .unwrap(); assert_eq!(actual, expected); } @@ -1698,13 +2239,13 @@ b8553a4fae8ae99fca716d423b14875ebb10fefe quux.use2.cache.amazonaws.com:6379@1122 ], ); - let actual = match parse_cluster_nodes(status.to_owned()) { + let actual = match parse_cluster_nodes(status) { Ok(h) => h, Err(e) => panic!("{}", e), }; assert_eq!(actual, expected); - let cache = ClusterKeyCache::new(Some(status.to_owned())).expect("Failed to build cluster cache"); + let cache = ClusterKeyCache::new(Some(status)).expect("Failed to build cluster cache"); let slot = cache.get_server(8246).unwrap(); assert_eq!(slot.server.as_str(), "quux.use2.cache.amazonaws.com:6379"); let slot = cache.get_server(1697).unwrap(); @@ -1753,7 +2294,7 @@ e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 myself,master - 0 0 1 c }], ); - let actual = match parse_cluster_nodes(status.to_owned()) { + let actual = match parse_cluster_nodes(status) { Ok(h) => h, Err(e) => panic!("{}", e), }; @@ -1796,7 +2337,7 @@ b4fa5337b58e02673f961e22c9557e81dda4b559 bar.cache.amazonaws.com:6379@1122 mysel }], ); - let actual = match parse_cluster_nodes(status.to_owned()) { + let actual = match parse_cluster_nodes(status) { Ok(h) => h, Err(e) => panic!("{}", e), }; @@ -1846,7 +2387,7 @@ b4fa5337b58e02673f961e22c9557e81dda4b559 bar.cache.amazonaws.com:6379@1122 mysel }], ); - let actual = match parse_cluster_nodes(status.to_owned()) { + let actual = match parse_cluster_nodes(status) { Ok(h) => h, Err(e) => panic!("{}", e), }; diff --git a/src/sentinel.rs b/src/sentinel.rs deleted file mode 100644 index 6cd88ecd..00000000 --- a/src/sentinel.rs +++ /dev/null @@ -1,724 +0,0 @@ -use crate::client::RedisClient; -use crate::commands; -use crate::error::RedisError; -use crate::modules::inner::RedisClientInner; -use crate::multiplexer::commands as multiplexer_commands; -use crate::multiplexer::utils as multiplexer_utils; -use crate::protocol::tls::TlsConfig; -use crate::types::{ - AclRule, AclUser, Blocking, ClientKillFilter, ClientKillType, ClientPauseKind, ClientState, ConnectHandle, - InfoKind, MultipleStrings, ReconnectPolicy, RedisConfig, RedisKey, RedisMap, RedisResponse, RedisValue, - SentinelFailureKind, ServerConfig, ShutdownFlags, Stats, -}; -use crate::utils; -use futures::{Stream, StreamExt}; -use std::convert::TryInto; -use std::fmt; -use std::net::IpAddr; -use std::sync::Arc; -use tokio::sync::mpsc::unbounded_channel; -use tokio_stream::wrappers::UnboundedReceiverStream; - -/// Configuration options for sentinel clients. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct SentinelConfig { - /// The hostname for the sentinel node. - /// - /// Default: `127.0.0.1` - pub host: String, - /// The port on which the sentinel node is listening. - /// - /// Default: `26379` - pub port: u16, - /// An optional ACL username for the client to use when authenticating. If ACL rules are not configured this should be `None`. - /// - /// Default: `None` - pub username: Option, - /// An optional password for the client to use when authenticating. - /// - /// Default: `None` - pub password: Option, - /// TLS configuration fields. If `None` the connection will not use TLS. - /// - /// Default: `None` - #[cfg(feature = "enable-tls")] - #[cfg_attr(docsrs, doc(cfg(feature = "enable-tls")))] - pub tls: Option, - /// Whether or not to enable tracing for this client. - /// - /// Default: `false` - #[cfg(feature = "partial-tracing")] - #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] - pub tracing: bool, -} - -impl Default for SentinelConfig { - fn default() -> Self { - SentinelConfig { - host: "127.0.0.1".into(), - port: 26379, - username: None, - password: None, - #[cfg(feature = "enable-tls")] - tls: None, - #[cfg(feature = "partial-tracing")] - tracing: false, - } - } -} - -#[doc(hidden)] -impl From for RedisConfig { - fn from(config: SentinelConfig) -> Self { - RedisConfig { - server: ServerConfig::Centralized { - host: config.host, - port: config.port, - }, - fail_fast: true, - pipeline: false, - blocking: Blocking::Block, - username: config.username, - password: config.password, - #[cfg(feature = "enable-tls")] - tls: config.tls, - #[cfg(feature = "partial-tracing")] - tracing: config.tracing, - } - } -} - -/// A struct for interacting directly with Sentinel nodes. -/// -/// This struct **will not** communicate with Redis servers behind the sentinel interface, but rather with the sentinel nodes themselves. Callers should use the [RedisClient](crate::client::RedisClient) interface with a [ServerConfig::Sentinel](crate::types::ServerConfig::Sentinel) for interacting with Redis services behind a sentinel layer. -/// -/// See the [sentinel API docs](https://redis.io/topics/sentinel#sentinel-api) for more information. -#[derive(Clone)] -pub struct SentinelClient { - inner: Arc, -} - -impl fmt::Display for SentinelClient { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "[SentinelClient {}: {}]", self.inner.id, self.state()) - } -} - -#[doc(hidden)] -impl<'a> From<&'a Arc> for SentinelClient { - fn from(inner: &'a Arc) -> Self { - SentinelClient { inner: inner.clone() } - } -} - -#[doc(hidden)] -impl From for SentinelClient { - fn from(client: RedisClient) -> Self { - SentinelClient { inner: client.inner } - } -} - -impl SentinelClient { - /// Create a new client instance without connecting to the sentinel node. - pub fn new(config: SentinelConfig) -> SentinelClient { - SentinelClient { - inner: RedisClientInner::new(config.into()), - } - } - - /// The unique ID identifying this client and underlying connections. All connections will use the ID of the client that created them. - /// - /// The client will use [CLIENT SETNAME](https://redis.io/commands/client-setname) upon initializing a connection so client logs can be associated with server logs. - pub fn id(&self) -> &Arc { - &self.inner.id - } - - /// Read the config used to initialize the client. - pub fn client_config(&self) -> RedisConfig { - utils::read_locked(&self.inner.config) - } - - /// Read the reconnect policy used to initialize the client. - pub fn client_reconnect_policy(&self) -> Option { - self.inner.policy.read().clone() - } - - /// Whether or not the client has a reconnection policy. - pub fn has_reconnect_policy(&self) -> bool { - self.inner.policy.read().is_some() - } - - /// Read the state of the underlying connection. - pub fn state(&self) -> ClientState { - self.inner.state.read().clone() - } - - /// Connect to the sentinel node with an optional reconnection policy. - /// - /// This function returns a `JoinHandle` to a task that drives the connection. It will not resolve until the connection closes, and if a - /// reconnection policy with unlimited attempts is provided then the `JoinHandle` will run forever. - pub fn connect(&self, policy: Option) -> ConnectHandle { - let inner = self.inner.clone(); - - tokio::spawn(async move { - let result = multiplexer_commands::init(&inner, policy).await; - if let Err(ref e) = result { - multiplexer_utils::emit_connect_error(&inner, e); - } - utils::set_client_state(&inner.state, ClientState::Disconnected); - result - }) - } - - /// Wait for the client to connect to the sentinel node, or return an error if the initial connection cannot be established. - /// If the client is already connected this future will resolve immediately. - /// - /// This can be used with `on_reconnect` to separate initialization logic that needs to occur only on the first connection attempt vs subsequent attempts. - pub async fn wait_for_connect(&self) -> Result<(), RedisError> { - utils::wait_for_connect(&self.inner).await - } - - /// Listen for reconnection notifications. - /// - /// This function can be used to receive notifications whenever the client successfully reconnects. - /// - /// A reconnection event is also triggered upon first connecting to the server. - pub fn on_reconnect(&self) -> impl Stream { - let (tx, rx) = unbounded_channel(); - self.inner.reconnect_tx.write().push_back(tx); - - UnboundedReceiverStream::new(rx).map(|client| client.into()) - } - - /// Listen for protocol and connection errors. This stream can be used to more intelligently handle errors that may not appear in the request-response - /// cycle, and so cannot be handled by response futures. - pub fn on_error(&self) -> impl Stream { - let (tx, rx) = unbounded_channel(); - self.inner.error_tx.write().push_back(tx); - - UnboundedReceiverStream::new(rx) - } - - /// Listen for `(channel, message)` tuples on the publish-subscribe interface. - /// - /// If the connection to the Sentinel server closes for any reason this function does not need to be called again. Messages will start appearing on the original stream after [subscribe](Self::subscribe) is called again. - /// - /// - pub fn on_message(&self) -> impl Stream { - let (tx, rx) = unbounded_channel(); - self.inner.message_tx.write().push_back(tx); - - UnboundedReceiverStream::new(rx) - } - - /// Whether or not the client has an active connection to the server(s). - pub fn is_connected(&self) -> bool { - *self.inner.state.read() == ClientState::Connected - } - - /// Read the number of buffered commands that have not yet been sent to the server. - pub fn command_queue_len(&self) -> usize { - utils::read_atomic(&self.inner.cmd_buffer_len) - } - - /// Read latency metrics across all commands. - /// - /// This metric reflects the total latency experienced by callers, including time spent waiting in memory to be written and network latency. - /// Features such as automatic reconnect, `reconnect-on-auth-error`, and frame serialization time can all affect these values. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn read_latency_metrics(&self) -> Stats { - self.inner.latency_stats.read().read_metrics() - } - - /// Read and consume latency metrics, resetting their values afterwards. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn take_latency_metrics(&self) -> Stats { - self.inner.latency_stats.write().take_metrics() - } - - /// Read network latency metrics across all commands. - /// - /// This metric only reflects time spent waiting on a response. It will factor in reconnect time if a response doesn't arrive due to a connection - /// closing, but it does not factor in the time a command spends waiting to be written, serialization time, backpressure, etc. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn read_network_latency_metrics(&self) -> Stats { - self.inner.network_latency_stats.read().read_metrics() - } - - /// Read and consume network latency metrics, resetting their values afterwards. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn take_network_latency_metrics(&self) -> Stats { - self.inner.network_latency_stats.write().take_metrics() - } - - /// Read request payload size metrics across all commands. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn read_req_size_metrics(&self) -> Stats { - self.inner.req_size_stats.read().read_metrics() - } - - /// Read and consume request payload size metrics, resetting their values afterwards. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn take_req_size_metrics(&self) -> Stats { - self.inner.req_size_stats.write().take_metrics() - } - - /// Read response payload size metrics across all commands. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn read_res_size_metrics(&self) -> Stats { - self.inner.res_size_stats.read().read_metrics() - } - - /// Read and consume response payload size metrics, resetting their values afterwards. - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - pub fn take_res_size_metrics(&self) -> Stats { - self.inner.res_size_stats.write().take_metrics() - } - - /// Get the current value of a global Sentinel configuration parameter. The specified name may be a wildcard, similar to the Redis CONFIG GET command. - pub async fn config_get(&self, name: K) -> Result - where - R: RedisResponse, - K: Into, - { - commands::sentinel::config_get(&self.inner, name).await?.convert() - } - - /// Set the value of a global Sentinel configuration parameter. - pub async fn config_set(&self, name: K, value: V) -> Result - where - R: RedisResponse, - K: Into, - V: TryInto, - V::Error: Into, - { - commands::sentinel::config_set(&self.inner, name, to!(value)?) - .await? - .convert() - } - - /// Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. - pub async fn ckquorum(&self, name: N) -> Result - where - R: RedisResponse, - N: Into, - { - commands::sentinel::ckquorum(&self.inner, name).await?.convert() - } - - /// Force Sentinel to rewrite its configuration on disk, including the current Sentinel state. - pub async fn flushconfig(&self) -> Result - where - R: RedisResponse, - { - commands::sentinel::flushconfig(&self.inner).await?.convert() - } - - /// Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels. - pub async fn failover(&self, name: N) -> Result - where - R: RedisResponse, - N: Into, - { - commands::sentinel::failover(&self.inner, name).await?.convert() - } - - /// Return the ip and port number of the master with that name. - pub async fn get_master_addr_by_name(&self, name: N) -> Result - where - R: RedisResponse, - N: Into, - { - commands::sentinel::get_master_addr_by_name(&self.inner, name) - .await? - .convert() - } - - /// Return cached INFO output from masters and replicas. - pub async fn info_cache(&self) -> Result - where - R: RedisResponse, - { - commands::sentinel::info_cache(&self.inner).await?.convert() - } - - /// Show the state and info of the specified master. - pub async fn master(&self, name: N) -> Result - where - R: RedisResponse, - N: Into, - { - commands::sentinel::master(&self.inner, name).await?.convert() - } - - /// Show a list of monitored masters and their state. - pub async fn masters(&self) -> Result - where - R: RedisResponse, - { - commands::sentinel::masters(&self.inner).await?.convert() - } - - /// Start Sentinel's monitoring. - /// - /// - pub async fn monitor(&self, name: N, ip: IpAddr, port: u16, quorum: u32) -> Result - where - R: RedisResponse, - N: Into, - { - commands::sentinel::monitor(&self.inner, name, ip, port, quorum) - .await? - .convert() - } - - /// Return the ID of the Sentinel instance. - pub async fn myid(&self) -> Result - where - R: RedisResponse, - { - commands::sentinel::myid(&self.inner).await?.convert() - } - - /// This command returns information about pending scripts. - pub async fn pending_scripts(&self) -> Result - where - R: RedisResponse, - { - commands::sentinel::pending_scripts(&self.inner).await?.convert() - } - - /// Stop Sentinel's monitoring. - /// - /// - pub async fn remove(&self, name: N) -> Result - where - R: RedisResponse, - N: Into, - { - commands::sentinel::remove(&self.inner, name).await?.convert() - } - - /// Show a list of replicas for this master, and their state. - pub async fn replicas(&self, name: N) -> Result - where - R: RedisResponse, - N: Into, - { - commands::sentinel::replicas(&self.inner, name).await?.convert() - } - - /// Show a list of sentinel instances for this master, and their state. - pub async fn sentinels(&self, name: N) -> Result - where - R: RedisResponse, - N: Into, - { - commands::sentinel::sentinels(&self.inner, name).await?.convert() - } - - /// Set Sentinel's monitoring configuration. - /// - /// - pub async fn set(&self, name: N, args: V) -> Result - where - R: RedisResponse, - N: Into, - V: Into, - { - commands::sentinel::set(&self.inner, name, args.into()).await?.convert() - } - - /// This command simulates different Sentinel crash scenarios. - pub async fn simulate_failure(&self, kind: SentinelFailureKind) -> Result - where - R: RedisResponse, - { - commands::sentinel::simulate_failure(&self.inner, kind).await?.convert() - } - - /// This command will reset all the masters with matching name. - pub async fn reset(&self, pattern: P) -> Result - where - R: RedisResponse, - P: Into, - { - commands::sentinel::reset(&self.inner, pattern).await?.convert() - } - - /// Return the ID of the current connection. - /// - /// - pub async fn client_id(&self) -> Result - where - R: RedisResponse, - { - commands::client::client_id(&self.inner).await?.convert() - } - - /// The command returns information and statistics about the current client connection in a mostly human readable format. - /// - /// - pub async fn client_info(&self) -> Result - where - R: RedisResponse, - { - commands::client::client_info(&self.inner).await?.convert() - } - - /// Close a given connection or set of connections. - /// - /// - pub async fn client_kill(&self, filters: Vec) -> Result - where - R: RedisResponse, - { - commands::client::client_kill(&self.inner, filters).await?.convert() - } - - /// The CLIENT LIST command returns information and statistics about the client connections server in a mostly human readable format. - /// - /// - pub async fn client_list(&self, r#type: Option, ids: Option>) -> Result - where - R: RedisResponse, - I: Into, - { - commands::client::client_list(&self.inner, r#type, ids).await?.convert() - } - - /// The CLIENT GETNAME returns the name of the current connection as set by CLIENT SETNAME. - /// - /// - pub async fn client_getname(&self) -> Result - where - R: RedisResponse, - { - commands::client::client_getname(&self.inner).await?.convert() - } - - /// Assign a name to the current connection. - /// - /// **Note: The client automatically generates a unique name for each client that is shared by all underlying connections. - /// Use [Self::id] to read the automatically generated name.** - /// - /// - pub async fn client_setname(&self, name: S) -> Result<(), RedisError> - where - S: Into, - { - commands::client::client_setname(&self.inner, name).await - } - - /// CLIENT PAUSE is a connections control command able to suspend all the Redis clients for the specified amount of time (in milliseconds). - /// - /// - pub async fn client_pause(&self, timeout: i64, mode: Option) -> Result<(), RedisError> { - commands::client::client_pause(&self.inner, timeout, mode).await - } - - /// CLIENT UNPAUSE is used to resume command processing for all clients that were paused by CLIENT PAUSE. - /// - /// - pub async fn client_unpause(&self) -> Result<(), RedisError> { - commands::client::client_unpause(&self.inner).await - } - - /// When the sentinel is configured to use an ACL file (with the aclfile configuration option), this command will reload the - /// ACLs from the file, replacing all the current ACL rules with the ones defined in the file. - /// - /// - pub async fn acl_load(&self) -> Result<(), RedisError> { - commands::acl::acl_load(&self.inner).await - } - - /// When Redis is configured to use an ACL file (with the ACL file configuration option), this command will save the - /// currently defined ACLs from the server memory to the ACL file. - /// - /// - pub async fn acl_save(&self) -> Result<(), RedisError> { - commands::acl::acl_save(&self.inner).await - } - - /// The command shows the currently active ACL rules in the Redis server. - /// - /// - pub async fn acl_list(&self) -> Result - where - R: RedisResponse, - { - commands::acl::acl_list(&self.inner).await?.convert() - } - - /// The command shows a list of all the usernames of the currently configured users in the Redis ACL system. - /// - /// - pub async fn acl_users(&self) -> Result - where - R: RedisResponse, - { - commands::acl::acl_users(&self.inner).await?.convert() - } - - /// The command returns all the rules defined for an existing ACL user. - /// - /// - pub async fn acl_getuser(&self, username: S) -> Result, RedisError> - where - S: Into, - { - commands::acl::acl_getuser(&self.inner, username).await - } - - /// Create an ACL user with the specified rules or modify the rules of an existing user. - /// - /// - pub async fn acl_setuser(&self, username: S, rules: Vec) -> Result<(), RedisError> - where - S: Into, - { - commands::acl::acl_setuser(&self.inner, username, rules).await - } - - /// Delete all the specified ACL users and terminate all the connections that are authenticated with such users. - /// - /// - pub async fn acl_deluser(&self, usernames: S) -> Result - where - R: RedisResponse, - S: Into, - { - commands::acl::acl_deluser(&self.inner, usernames).await?.convert() - } - - /// The command shows the available ACL categories if called without arguments. If a category name is given, - /// the command shows all the Redis commands in the specified category. - /// - /// - pub async fn acl_cat(&self, category: Option) -> Result, RedisError> - where - S: Into, - { - commands::acl::acl_cat(&self.inner, category).await?.convert() - } - - /// Generate a password with length `bits`, returning the password. - pub async fn acl_genpass(&self, bits: Option) -> Result { - commands::acl::acl_genpass(&self.inner, bits).await?.convert() - } - - /// Return the username the current connection is authenticated with. New connections are authenticated - /// with the "default" user. - /// - /// - pub async fn acl_whoami(&self) -> Result { - commands::acl::acl_whoami(&self.inner).await?.convert() - } - - /// Read `count` recent ACL security events. - /// - /// - pub async fn acl_log_count(&self, count: Option) -> Result { - commands::acl::acl_log_count(&self.inner, count).await - } - - /// Clear the ACL security events logs. - /// - /// - pub async fn acl_log_reset(&self) -> Result<(), RedisError> { - commands::acl::acl_log_reset(&self.inner).await - } - - /// Request for authentication in a password-protected Sentinel server. Returns ok if successful. - /// - /// The client will automatically authenticate with the default user if a password is provided in the associated `SentinelConfig`. - /// - /// - pub async fn auth(&self, username: Option, password: S) -> Result<(), RedisError> - where - S: Into, - { - commands::server::auth(&self.inner, username, password).await - } - - /// Read info about the Sentinel server. - /// - /// - pub async fn info(&self, section: Option) -> Result - where - R: RedisResponse, - { - commands::server::info(&self.inner, section).await?.convert() - } - - /// Ping the Sentinel server. - /// - /// - pub async fn ping(&self) -> Result<(), RedisError> { - commands::server::ping(&self.inner).await?.convert() - } - - /// Shut down the server and quit the client. - /// - /// - pub async fn shutdown(&self, flags: Option) -> Result<(), RedisError> { - commands::server::shutdown(&self.inner, flags).await - } - - /// Close the connection to the Redis server. The returned future resolves when the command has been written to the socket, - /// not when the connection has been fully closed. Some time after this future resolves the future returned by [connect](Self::connect) - /// will resolve which indicates that the connection has been fully closed. - /// - /// This function will also close all error, pubsub message, and reconnection event streams. - pub async fn quit(&self) -> Result<(), RedisError> { - commands::server::quit(&self.inner).await - } - - /// Subscribe to a channel on the PubSub interface, returning the number of channels to which the client is subscribed. - /// - /// - pub async fn subscribe(&self, channel: S) -> Result - where - S: Into, - { - commands::pubsub::subscribe(&self.inner, channel).await - } - - /// Unsubscribe from a channel on the PubSub interface, returning the number of channels to which hte client is subscribed. - /// - /// - pub async fn unsubscribe(&self, channel: S) -> Result - where - S: Into, - { - commands::pubsub::unsubscribe(&self.inner, channel).await - } - - /// Subscribes the client to the given patterns. - /// - /// - pub async fn psubscribe(&self, patterns: S) -> Result, RedisError> - where - S: Into, - { - commands::pubsub::psubscribe(&self.inner, patterns).await - } - - /// Unsubscribes the client from the given patterns, or from all of them if none is given. - /// - /// - pub async fn punsubscribe(&self, patterns: S) -> Result, RedisError> - where - S: Into, - { - commands::pubsub::punsubscribe(&self.inner, patterns).await - } -} diff --git a/src/trace/README.md b/src/trace/README.md index 56d2b4c3..e8ffb6c0 100644 --- a/src/trace/README.md +++ b/src/trace/README.md @@ -34,7 +34,6 @@ This table shows the spans emitted by the client. The `Partial Trace` column des |------------------|---------------------------------------------------------------------| | client_id | The ID of the client instance (`client.id()`). | | cmd | The redis command name. | -| key | The first key used by the command. | | req_size | The size (in bytes) of the command's arguments. | | res_size | The size (in bytes) of the command's response. | | num_args | The number of arguments being sent to the server. | diff --git a/src/trace/disabled.rs b/src/trace/disabled.rs index 2f29b056..14327111 100644 --- a/src/trace/disabled.rs +++ b/src/trace/disabled.rs @@ -3,7 +3,7 @@ use crate::modules::inner::RedisClientInner; #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] use crate::protocol::types::RedisCommand; #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] -use redis_protocol::resp2::types::Frame; +use redis_protocol::resp3::types::Frame; #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] use std::sync::Arc; diff --git a/src/trace/enabled.rs b/src/trace/enabled.rs index 47c3560b..18a88750 100644 --- a/src/trace/enabled.rs +++ b/src/trace/enabled.rs @@ -2,7 +2,7 @@ use crate::modules::inner::RedisClientInner; use crate::protocol::types::RedisCommand; use crate::protocol::utils as protocol_utils; use crate::utils; -use redis_protocol::resp2::types::Frame; +use redis_protocol::resp3::types::Frame; use std::fmt; use std::sync::Arc; use tracing::event; @@ -55,7 +55,7 @@ pub fn set_network_span(command: &mut RedisCommand, flush: bool) { } pub fn record_response_size(span: &Span, frame: &Frame) { - span.record("res_size", &protocol_utils::frame_size(frame)); + span.record("res_size", &protocol_utils::resp3_frame_size(frame)); } pub fn create_command_span(inner: &Arc) -> Span { @@ -65,7 +65,6 @@ pub fn create_command_span(inner: &Arc) -> Span { module = "fred", client_id = inner.id.as_str(), cmd = Empty, - key = Empty, req_size = Empty, res_size = Empty ) @@ -100,7 +99,7 @@ pub fn create_pubsub_span(inner: &Arc, frame: &Frame) -> Span "parse_pubsub", module = "fred", client_id = &inner.id.as_str(), - res_size = &protocol_utils::frame_size(frame), + res_size = &protocol_utils::resp3_frame_size(frame), channel = Empty ) } diff --git a/src/types/acl.rs b/src/types/acl.rs new file mode 100644 index 00000000..72a807f1 --- /dev/null +++ b/src/types/acl.rs @@ -0,0 +1,132 @@ +use crate::types::RedisValue; + +/// ACL rules describing the keys a user can access. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum AclKeyPattern { + AllKeys, + Custom(String), +} + +impl AclKeyPattern { + pub(crate) fn to_value(&self) -> RedisValue { + match *self { + AclKeyPattern::AllKeys => RedisValue::from_static_str("allkeys"), + AclKeyPattern::Custom(ref pat) => format!("~{}", pat).into(), + } + } +} + +/// ACL rules describing the channels a user can access. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum AclChannelPattern { + AllChannels, + Custom(String), +} + +impl AclChannelPattern { + pub(crate) fn to_value(&self) -> RedisValue { + match *self { + AclChannelPattern::AllChannels => RedisValue::from_static_str("allchannels"), + AclChannelPattern::Custom(ref pat) => format!("&{}", pat).into(), + } + } +} + +/// ACL rules describing the commands a user can access. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum AclCommandPattern { + AllCommands, + NoCommands, + Custom { + command: String, + subcommand: Option, + }, +} + +impl AclCommandPattern { + pub(crate) fn to_value(&self, prefix: &'static str) -> RedisValue { + match *self { + AclCommandPattern::AllCommands => RedisValue::from_static_str("allcommands"), + AclCommandPattern::NoCommands => RedisValue::from_static_str("nocommands"), + AclCommandPattern::Custom { + ref command, + ref subcommand, + } => { + if let Some(subcommand) = subcommand { + format!("{}{}|{}", prefix, command, subcommand).into() + } else { + format!("{}{}", prefix, command).into() + } + } + } + } +} + +/// ACL rules associated with a user. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum AclRule { + On, + Off, + Reset, + ResetChannels, + ResetKeys, + AddKeys(AclKeyPattern), + AddChannels(AclChannelPattern), + AddCommands(AclCommandPattern), + RemoveCommands(AclCommandPattern), + AddCategory(String), + RemoveCategory(String), + NoPass, + AddPassword(String), + AddHashedPassword(String), + RemovePassword(String), + RemoveHashedPassword(String), +} + +impl AclRule { + pub(crate) fn to_value(&self) -> RedisValue { + match self { + AclRule::On => RedisValue::from_static_str("on"), + AclRule::Off => RedisValue::from_static_str("off"), + AclRule::Reset => RedisValue::from_static_str("reset"), + AclRule::ResetChannels => RedisValue::from_static_str("resetchannels"), + AclRule::ResetKeys => RedisValue::from_static_str("resetkeys"), + AclRule::NoPass => RedisValue::from_static_str("nopass"), + AclRule::AddPassword(ref pass) => format!(">{}", pass).into(), + AclRule::RemovePassword(ref pass) => format!("<{}", pass).into(), + AclRule::AddHashedPassword(ref pass) => format!("#{}", pass).into(), + AclRule::RemoveHashedPassword(ref pass) => format!("!{}", pass).into(), + AclRule::AddCategory(ref cat) => format!("+@{}", cat).into(), + AclRule::RemoveCategory(ref cat) => format!("-@{}", cat).into(), + AclRule::AddKeys(ref pat) => pat.to_value(), + AclRule::AddChannels(ref pat) => pat.to_value(), + AclRule::AddCommands(ref pat) => pat.to_value("+"), + AclRule::RemoveCommands(ref pat) => pat.to_value("-"), + } + } +} + +/// A flag from the ACL GETUSER command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum AclUserFlag { + On, + Off, + AllKeys, + AllChannels, + AllCommands, + NoPass, +} + +/// An ACL user from the ACL GETUSER command. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq, Default)] +pub struct AclUser { + pub flags: Vec, + pub passwords: Vec, + pub commands: Vec, + pub keys: Vec, + pub channels: Vec, +} diff --git a/src/types/args.rs b/src/types/args.rs new file mode 100644 index 00000000..3a833870 --- /dev/null +++ b/src/types/args.rs @@ -0,0 +1,1564 @@ +use crate::error::{RedisError, RedisErrorKind}; +use crate::interfaces::{ClientLike, Resp3Frame}; +use crate::protocol::connection::OK; +use crate::protocol::utils as protocol_utils; +use crate::types::{FromRedis, FromRedisKey, GeoPosition, XReadResponse, XReadValue, NIL, QUEUED}; +use crate::utils; +use bytes::Bytes; +use bytes_utils::Str; +use float_cmp::approx_eq; +use redis_protocol::resp2::types::NULL; +use std::borrow::Cow; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::{TryFrom, TryInto}; +use std::hash::{Hash, Hasher}; +use std::iter::FromIterator; +use std::ops::{Deref, DerefMut}; +use std::sync::Arc; +use std::{fmt, mem, str}; + +#[cfg(feature = "serde-json")] +use serde_json::Value; + +static_str!(TRUE_STR, "true"); +static_str!(FALSE_STR, "false"); + +macro_rules! impl_string_or_number( + ($t:ty) => { + impl From<$t> for StringOrNumber { + fn from(val: $t) -> Self { + StringOrNumber::Number(val as i64) + } + } + } +); + +macro_rules! impl_from_str_for_redis_key( + ($t:ty) => { + impl From<$t> for RedisKey { + fn from(val: $t) -> Self { + RedisKey { key: val.to_string().into() } + } + } + } +); + +/// An argument representing a string or number. +#[derive(Clone, Debug)] +pub enum StringOrNumber { + String(Str), + Number(i64), + Double(f64), +} + +impl PartialEq for StringOrNumber { + fn eq(&self, other: &Self) -> bool { + match *self { + StringOrNumber::String(ref s) => match *other { + StringOrNumber::String(ref _s) => s == _s, + _ => false, + }, + StringOrNumber::Number(ref i) => match *other { + StringOrNumber::Number(ref _i) => *i == *_i, + _ => false, + }, + StringOrNumber::Double(ref d) => match *other { + StringOrNumber::Double(ref _d) => utils::f64_eq(*d, *_d), + _ => false, + }, + } + } +} + +impl Eq for StringOrNumber {} + +impl StringOrNumber { + /// An optimized way to convert from `&'static str` that avoids copying or moving the underlying bytes. + pub fn from_static_str(s: &'static str) -> Self { + StringOrNumber::String(utils::static_str(s)) + } + + pub(crate) fn into_arg(self) -> RedisValue { + match self { + StringOrNumber::String(s) => RedisValue::String(s), + StringOrNumber::Number(n) => RedisValue::Integer(n), + StringOrNumber::Double(f) => RedisValue::Double(f), + } + } +} + +impl TryFrom for StringOrNumber { + type Error = RedisError; + + fn try_from(value: RedisValue) -> Result { + let val = match value { + RedisValue::String(s) => StringOrNumber::String(s), + RedisValue::Integer(i) => StringOrNumber::Number(i), + RedisValue::Double(f) => StringOrNumber::Double(f), + RedisValue::Bytes(b) => StringOrNumber::String(Str::from_inner(b)?), + _ => return Err(RedisError::new(RedisErrorKind::InvalidArgument, "")), + }; + + Ok(val) + } +} + +impl<'a> From<&'a str> for StringOrNumber { + fn from(s: &'a str) -> Self { + StringOrNumber::String(s.into()) + } +} + +impl From for StringOrNumber { + fn from(s: String) -> Self { + StringOrNumber::String(s.into()) + } +} + +impl From for StringOrNumber { + fn from(s: Str) -> Self { + StringOrNumber::String(s) + } +} + +impl_string_or_number!(i8); +impl_string_or_number!(i16); +impl_string_or_number!(i32); +impl_string_or_number!(i64); +impl_string_or_number!(isize); +impl_string_or_number!(u8); +impl_string_or_number!(u16); +impl_string_or_number!(u32); +impl_string_or_number!(u64); +impl_string_or_number!(usize); + +impl From for StringOrNumber { + fn from(f: f32) -> Self { + StringOrNumber::Double(f as f64) + } +} + +impl From for StringOrNumber { + fn from(f: f64) -> Self { + StringOrNumber::Double(f) + } +} + +/// A key in Redis. +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct RedisKey { + key: Bytes, +} + +impl RedisKey { + /// Create a new `RedisKey` from static bytes without copying. + pub fn from_static(b: &'static [u8]) -> Self { + RedisKey { + key: Bytes::from_static(b), + } + } + + /// Create a new `RedisKey` from a `&'static str` without copying. + pub fn from_static_str(b: &'static str) -> Self { + RedisKey { + key: Bytes::from_static(b.as_bytes()), + } + } + + /// Read the key as a str slice if it can be parsed as a UTF8 string. + pub fn as_str(&self) -> Option<&str> { + str::from_utf8(&self.key).ok() + } + + /// Read the key as a byte slice. + pub fn as_bytes(&self) -> &[u8] { + &self.key + } + + /// Read the inner `Bytes` struct. + pub fn inner(&self) -> &Bytes { + &self.key + } + + /// Read the key as a lossy UTF8 string with `String::from_utf8_lossy`. + pub fn as_str_lossy(&self) -> Cow { + String::from_utf8_lossy(&self.key) + } + + /// Convert the key to a UTF8 string, if possible. + pub fn into_string(self) -> Option { + String::from_utf8(self.key.to_vec()).ok() + } + + /// Read the inner bytes making up the key. + pub fn into_bytes(self) -> Bytes { + self.key + } + + /// Parse and return the key as a `Str` without copying the inner contents. + pub fn as_bytes_str(&self) -> Option { + Str::from_inner(self.key.clone()).ok() + } + + /// Hash the key to find the associated cluster [hash slot](https://redis.io/topics/cluster-spec#keys-distribution-model). + pub fn cluster_hash(&self) -> u16 { + redis_protocol::redis_keyslot(&self.key) + } + + /// Read the `host:port` of the cluster node that owns the key if the client is clustered and the cluster state is known. + pub fn cluster_owner(&self, client: &C) -> Option> + where + C: ClientLike, + { + if utils::is_clustered(&client.inner().config) { + let hash_slot = self.cluster_hash(); + client + .inner() + .cluster_state + .read() + .as_ref() + .and_then(|state| state.get_server(hash_slot).map(|slot| slot.server.clone())) + } else { + None + } + } + + /// Replace this key with an empty byte array, returning the bytes from the original key. + pub fn take(&mut self) -> Bytes { + self.key.split_to(self.key.len()) + } + + /// Attempt to convert the key to any type that implements [FromRedisKey](crate::types::FromRedisKey). + /// + /// See the [RedisValue::convert](crate::types::RedisValue::convert) documentation for more information. + pub fn convert(self) -> Result + where + K: FromRedisKey, + { + K::from_key(self) + } +} + +impl TryFrom for RedisKey { + type Error = RedisError; + + fn try_from(value: RedisValue) -> Result { + let val = match value { + RedisValue::String(s) => RedisKey { key: s.into_inner() }, + RedisValue::Integer(i) => RedisKey { + key: i.to_string().into(), + }, + RedisValue::Double(f) => RedisKey { + key: f.to_string().into(), + }, + RedisValue::Bytes(b) => RedisKey { key: b }, + RedisValue::Boolean(b) => match b { + true => RedisKey { + key: TRUE_STR.clone().into_inner().into(), + }, + false => RedisKey { + key: FALSE_STR.clone().into_inner().into(), + }, + }, + RedisValue::Queued => utils::static_str(QUEUED).into(), + _ => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Cannot convert to key.", + )) + } + }; + + Ok(val) + } +} + +impl From for RedisKey { + fn from(b: Bytes) -> Self { + RedisKey { key: b } + } +} + +impl<'a> From<&'a [u8]> for RedisKey { + fn from(b: &'a [u8]) -> Self { + RedisKey { key: b.to_vec().into() } + } +} + +/* +// doing this prevents MultipleKeys from being generic in its `From` implementations since the compiler cant know what to do with `Vec`. +impl From> for RedisKey { + fn from(b: Vec) -> Self { + RedisKey { key: b.into() } + } +} +*/ + +impl From for RedisKey { + fn from(s: String) -> Self { + RedisKey { key: s.into() } + } +} + +impl<'a> From<&'a str> for RedisKey { + fn from(s: &'a str) -> Self { + RedisKey { + key: s.as_bytes().to_vec().into(), + } + } +} + +impl<'a> From<&'a String> for RedisKey { + fn from(s: &'a String) -> Self { + RedisKey { key: s.clone().into() } + } +} + +impl From for RedisKey { + fn from(s: Str) -> Self { + RedisKey { key: s.into_inner() } + } +} + +impl<'a> From<&'a RedisKey> for RedisKey { + fn from(k: &'a RedisKey) -> RedisKey { + k.clone() + } +} + +impl From for RedisKey { + fn from(b: bool) -> Self { + match b { + true => RedisKey::from_static_str("true"), + false => RedisKey::from_static_str("false"), + } + } +} + +impl_from_str_for_redis_key!(u8); +impl_from_str_for_redis_key!(u16); +impl_from_str_for_redis_key!(u32); +impl_from_str_for_redis_key!(u64); +impl_from_str_for_redis_key!(u128); +impl_from_str_for_redis_key!(usize); +impl_from_str_for_redis_key!(i8); +impl_from_str_for_redis_key!(i16); +impl_from_str_for_redis_key!(i32); +impl_from_str_for_redis_key!(i64); +impl_from_str_for_redis_key!(i128); +impl_from_str_for_redis_key!(isize); +impl_from_str_for_redis_key!(f32); +impl_from_str_for_redis_key!(f64); + +#[cfg(feature = "serde-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde-json")))] +impl TryFrom for RedisKey { + type Error = RedisError; + + fn try_from(value: Value) -> Result { + let value: RedisKey = match value { + Value::String(s) => s.into(), + Value::Bool(b) => b.to_string().into(), + Value::Number(n) => n.to_string().into(), + _ => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Cannot convert to key from JSON.", + )) + } + }; + + Ok(value) + } +} + +/// A map of `(RedisKey, RedisValue)` pairs. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct RedisMap { + pub(crate) inner: HashMap, +} + +impl RedisMap { + /// Create a new empty map. + pub fn new() -> Self { + RedisMap { inner: HashMap::new() } + } + + /// Replace the value an empty map, returning the original value. + pub fn take(&mut self) -> Self { + RedisMap { + inner: mem::replace(&mut self.inner, HashMap::new()), + } + } + + /// Read the number of (key, value) pairs in the map. + pub fn len(&self) -> usize { + self.inner.len() + } + + /// Take the inner `HashMap`. + pub fn inner(self) -> HashMap { + self.inner + } +} + +impl Deref for RedisMap { + type Target = HashMap; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for RedisMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl<'a> From<&'a RedisMap> for RedisMap { + fn from(vals: &'a RedisMap) -> Self { + vals.clone() + } +} + +impl TryFrom> for RedisMap +where + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from(value: HashMap) -> Result { + Ok(RedisMap { + inner: utils::into_redis_map(value.into_iter())?, + }) + } +} + +impl TryFrom> for RedisMap +where + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from(value: BTreeMap) -> Result { + Ok(RedisMap { + inner: utils::into_redis_map(value.into_iter())?, + }) + } +} + +impl TryFrom<(K, V)> for RedisMap +where + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from((key, value): (K, V)) -> Result { + let mut inner = HashMap::with_capacity(1); + inner.insert(to!(key)?, to!(value)?); + Ok(RedisMap { inner }) + } +} + +impl TryFrom> for RedisMap +where + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from(values: Vec<(K, V)>) -> Result { + let mut inner = HashMap::with_capacity(values.len()); + for (key, value) in values.into_iter() { + inner.insert(to!(key)?, to!(value)?); + } + Ok(RedisMap { inner }) + } +} + +impl TryFrom> for RedisMap +where + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from(values: VecDeque<(K, V)>) -> Result { + let mut inner = HashMap::with_capacity(values.len()); + for (key, value) in values.into_iter() { + inner.insert(to!(key)?, to!(value)?); + } + Ok(RedisMap { inner }) + } +} + +#[cfg(feature = "serde-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde-json")))] +impl TryFrom for RedisMap { + type Error = RedisError; + + fn try_from(value: Value) -> Result { + if let Value::Object(map) = value { + let mut inner = HashMap::with_capacity(map.len()); + for (key, value) in map.into_iter() { + let key: RedisKey = key.into(); + let value: RedisValue = value.try_into()?; + + inner.insert(key, value); + } + + Ok(RedisMap { inner }) + } else { + Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Cannot convert non-object JSON value to map.", + )) + } + } +} + +/// The kind of value from Redis. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum RedisValueKind { + Boolean, + Integer, + Double, + String, + Bytes, + Null, + Queued, + Map, + Array, +} + +impl fmt::Display for RedisValueKind { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let s = match *self { + RedisValueKind::Boolean => "Boolean", + RedisValueKind::Integer => "Integer", + RedisValueKind::Double => "Double", + RedisValueKind::String => "String", + RedisValueKind::Bytes => "Bytes", + RedisValueKind::Null => "nil", + RedisValueKind::Queued => "Queued", + RedisValueKind::Map => "Map", + RedisValueKind::Array => "Array", + }; + + write!(f, "{}", s) + } +} + +/// A value used in a Redis command. +#[derive(Clone, Debug)] +pub enum RedisValue { + /// A boolean value. + Boolean(bool), + /// An integer value. + Integer(i64), + /// A double floating point number. + Double(f64), + /// A string value. + String(Str), + /// A value to represent non-UTF8 strings or byte arrays. + Bytes(Bytes), + /// A `nil` value. + Null, + /// A special value used to indicate a MULTI block command was received by the server. + Queued, + /// A map of key/value pairs, primarily used in RESP3 mode. + Map(RedisMap), + /// An ordered list of values. + /// + /// In RESP2 mode the server may send map structures as an array of key/value pairs. + Array(Vec), +} + +impl PartialEq for RedisValue { + fn eq(&self, other: &Self) -> bool { + use RedisValue::*; + + match self { + Boolean(ref s) => match other { + Boolean(ref o) => *s == *o, + _ => false, + }, + Integer(ref s) => match other { + Integer(ref o) => *s == *o, + _ => false, + }, + Double(ref s) => match other { + Double(ref o) => approx_eq!(f64, *s, *o, ulps = 2), + _ => false, + }, + String(ref s) => match other { + String(ref o) => s == o, + _ => false, + }, + Bytes(ref s) => match other { + Bytes(ref o) => s == o, + _ => false, + }, + Null => match other { + Null => true, + _ => false, + }, + Queued => match other { + Queued => true, + _ => false, + }, + Map(ref s) => match other { + Map(ref o) => s == o, + _ => false, + }, + Array(ref s) => match other { + Array(ref o) => s == o, + _ => false, + }, + } + } +} + +impl Eq for RedisValue {} + +impl<'a> RedisValue { + /// Create a new `RedisValue::Bytes` from a static byte slice without copying. + pub fn from_static(b: &'static [u8]) -> Self { + RedisValue::Bytes(Bytes::from_static(b)) + } + + /// Create a new `RedisValue::String` from a static `str` without copying. + pub fn from_static_str(s: &'static str) -> Self { + RedisValue::String(utils::static_str(s)) + } + + /// Create a new `RedisValue` with the `OK` status. + pub fn new_ok() -> Self { + Self::from_static_str(OK) + } + + /// Whether or not the value is a simple string OK value. + pub fn is_ok(&self) -> bool { + match *self { + RedisValue::String(ref s) => *s == OK, + _ => false, + } + } + + /// Attempt to convert the value into an integer, returning the original string as an error if the parsing fails. + pub fn into_integer(self) -> Result { + match self { + RedisValue::String(s) => match s.parse::() { + Ok(i) => Ok(RedisValue::Integer(i)), + Err(_) => Err(RedisValue::String(s)), + }, + RedisValue::Integer(i) => Ok(RedisValue::Integer(i)), + _ => Err(self), + } + } + + /// Read the type of the value without any associated data. + pub fn kind(&self) -> RedisValueKind { + match *self { + RedisValue::Boolean(_) => RedisValueKind::Boolean, + RedisValue::Integer(_) => RedisValueKind::Integer, + RedisValue::Double(_) => RedisValueKind::Double, + RedisValue::String(_) => RedisValueKind::String, + RedisValue::Bytes(_) => RedisValueKind::Bytes, + RedisValue::Null => RedisValueKind::Null, + RedisValue::Queued => RedisValueKind::Queued, + RedisValue::Map(_) => RedisValueKind::Map, + RedisValue::Array(_) => RedisValueKind::Array, + } + } + + /// Check if the value is null. + pub fn is_null(&self) -> bool { + match *self { + RedisValue::Null => true, + _ => false, + } + } + + /// Check if the value is an integer. + pub fn is_integer(&self) -> bool { + match *self { + RedisValue::Integer(_) => true, + _ => false, + } + } + + /// Check if the value is a string. + pub fn is_string(&self) -> bool { + match *self { + RedisValue::String(_) => true, + _ => false, + } + } + + /// Check if the value is an array of bytes. + pub fn is_bytes(&self) -> bool { + match *self { + RedisValue::Bytes(_) => true, + _ => false, + } + } + + /// Whether or not the value is a boolean value or can be parsed as a boolean value. + pub fn is_boolean(&self) -> bool { + match *self { + RedisValue::Boolean(_) => true, + RedisValue::Integer(i) => match i { + 0 | 1 => true, + _ => false, + }, + RedisValue::String(ref s) => match s.as_bytes() { + b"true" | b"false" | b"t" | b"f" | b"TRUE" | b"FALSE" | b"T" | b"F" | b"1" | b"0" => true, + _ => false, + }, + _ => false, + } + } + + /// Whether or not the inner value is a double or can be parsed as a double. + pub fn is_double(&self) -> bool { + match *self { + RedisValue::Double(_) => true, + RedisValue::String(ref s) => utils::redis_string_to_f64(s).is_ok(), + _ => false, + } + } + + /// Check if the value is a `QUEUED` response. + pub fn is_queued(&self) -> bool { + match *self { + RedisValue::Queued => true, + _ => false, + } + } + + /// Whether or not the value is an array or map. + pub fn is_aggregate_type(&self) -> bool { + match *self { + RedisValue::Array(_) | RedisValue::Map(_) => true, + _ => false, + } + } + + /// Whether or not the value is a `RedisMap`. + /// + /// See [is_maybe_map](Self::is_maybe_map) for a function that also checks for arrays that likely represent a map in RESP2 mode. + pub fn is_map(&self) -> bool { + match *self { + RedisValue::Map(_) => true, + _ => false, + } + } + + /// Whether or not the value is a `RedisMap` or an array with an even number of elements where each even-numbered element is not an aggregate type. + /// + /// RESP2 and RESP3 encode maps differently, and this function can be used to duck-type maps across protocol versions. + pub fn is_maybe_map(&self) -> bool { + match *self { + RedisValue::Map(_) => true, + RedisValue::Array(ref arr) => utils::is_maybe_array_map(arr), + _ => false, + } + } + + /// Whether or not the value is an array. + pub fn is_array(&self) -> bool { + match *self { + RedisValue::Array(_) => true, + _ => false, + } + } + + /// Read and return the inner value as a `u64`, if possible. + pub fn as_u64(&self) -> Option { + match self { + RedisValue::Integer(ref i) => { + if *i >= 0 { + Some(*i as u64) + } else { + None + } + } + RedisValue::String(ref s) => s.parse::().ok(), + RedisValue::Array(ref inner) => { + if inner.len() == 1 { + inner.first().and_then(|v| v.as_u64()) + } else { + None + } + } + _ => None, + } + } + + /// Read and return the inner value as a `i64`, if possible. + pub fn as_i64(&self) -> Option { + match self { + RedisValue::Integer(ref i) => Some(*i), + RedisValue::String(ref s) => s.parse::().ok(), + RedisValue::Array(ref inner) => { + if inner.len() == 1 { + inner.first().and_then(|v| v.as_i64()) + } else { + None + } + } + _ => None, + } + } + + /// Read and return the inner value as a `usize`, if possible. + pub fn as_usize(&self) -> Option { + match self { + RedisValue::Integer(i) => { + if *i >= 0 { + Some(*i as usize) + } else { + None + } + } + RedisValue::String(ref s) => s.parse::().ok(), + RedisValue::Array(ref inner) => { + if inner.len() == 1 { + inner.first().and_then(|v| v.as_usize()) + } else { + None + } + } + _ => None, + } + } + + /// Read and return the inner value as a `f64`, if possible. + pub fn as_f64(&self) -> Option { + match self { + RedisValue::Double(ref f) => Some(*f), + RedisValue::String(ref s) => utils::redis_string_to_f64(s).ok(), + RedisValue::Integer(ref i) => Some(*i as f64), + RedisValue::Array(ref inner) => { + if inner.len() == 1 { + inner.first().and_then(|v| v.as_f64()) + } else { + None + } + } + _ => None, + } + } + + /// Read and return the inner `String` if the value is a string or scalar value. + pub fn into_string(self) -> Option { + match self { + RedisValue::Boolean(b) => Some(b.to_string()), + RedisValue::Double(f) => Some(f.to_string()), + RedisValue::String(s) => Some(s.to_string()), + RedisValue::Bytes(b) => String::from_utf8(b.to_vec()).ok(), + RedisValue::Integer(i) => Some(i.to_string()), + RedisValue::Queued => Some(QUEUED.to_owned()), + RedisValue::Array(mut inner) => { + if inner.len() == 1 { + inner.pop().and_then(|v| v.into_string()) + } else { + None + } + } + _ => None, + } + } + + /// Read and return the inner data as a `Str` from the `bytes` crate. + pub fn into_bytes_str(self) -> Option { + match self { + RedisValue::Boolean(b) => match b { + true => Some(TRUE_STR.clone()), + false => Some(FALSE_STR.clone()), + }, + RedisValue::Double(f) => Some(f.to_string().into()), + RedisValue::String(s) => Some(s), + RedisValue::Bytes(b) => Str::from_inner(b).ok(), + RedisValue::Integer(i) => Some(i.to_string().into()), + RedisValue::Queued => Some(utils::static_str(QUEUED)), + RedisValue::Array(mut inner) => { + if inner.len() == 1 { + inner.pop().and_then(|v| v.into_bytes_str()) + } else { + None + } + } + _ => None, + } + } + + /// Read the inner value as a `Str`. + pub fn as_bytes_str(&self) -> Option { + match self { + RedisValue::Boolean(ref b) => match *b { + true => Some(TRUE_STR.clone()), + false => Some(FALSE_STR.clone()), + }, + RedisValue::Double(ref f) => Some(f.to_string().into()), + RedisValue::String(ref s) => Some(s.clone()), + RedisValue::Bytes(ref b) => Str::from_inner(b.clone()).ok(), + RedisValue::Integer(ref i) => Some(i.to_string().into()), + RedisValue::Queued => Some(utils::static_str(QUEUED)), + RedisValue::Array(ref inner) => { + if inner.len() == 1 { + inner[0].as_bytes_str() + } else { + None + } + } + _ => None, + } + } + + /// Read and return the inner `String` if the value is a string or scalar value. + /// + /// Note: this will cast integers and doubles to strings. + pub fn as_string(&self) -> Option { + match self { + RedisValue::Boolean(ref b) => Some(b.to_string()), + RedisValue::Double(ref f) => Some(f.to_string()), + RedisValue::String(ref s) => Some(s.to_string()), + RedisValue::Bytes(ref b) => str::from_utf8(b).ok().map(|s| s.to_owned()), + RedisValue::Integer(ref i) => Some(i.to_string()), + RedisValue::Queued => Some(QUEUED.to_owned()), + _ => None, + } + } + + /// Read the inner value as a string slice. + /// + /// Null is returned as `"nil"` and scalar values are cast to a string. + pub fn as_str(&self) -> Option> { + let s: Cow = match *self { + RedisValue::Double(ref f) => Cow::Owned(f.to_string()), + RedisValue::Boolean(ref b) => Cow::Owned(b.to_string()), + RedisValue::String(ref s) => Cow::Borrowed(s.deref().as_ref()), + RedisValue::Integer(ref i) => Cow::Owned(i.to_string()), + RedisValue::Null => Cow::Borrowed(NIL), + RedisValue::Queued => Cow::Borrowed(QUEUED), + RedisValue::Bytes(ref b) => return str::from_utf8(b).ok().map(|s| Cow::Borrowed(s)), + _ => return None, + }; + + Some(s) + } + + /// Read the inner value as a string, using `String::from_utf8_lossy` on byte slices. + pub fn as_str_lossy(&self) -> Option> { + let s: Cow = match *self { + RedisValue::Boolean(ref b) => Cow::Owned(b.to_string()), + RedisValue::Double(ref f) => Cow::Owned(f.to_string()), + RedisValue::String(ref s) => Cow::Borrowed(s.deref().as_ref()), + RedisValue::Integer(ref i) => Cow::Owned(i.to_string()), + RedisValue::Null => Cow::Borrowed(NIL), + RedisValue::Queued => Cow::Borrowed(QUEUED), + RedisValue::Bytes(ref b) => String::from_utf8_lossy(b), + _ => return None, + }; + + Some(s) + } + + /// Read the inner value as an array of bytes, if possible. + pub fn as_bytes(&self) -> Option<&[u8]> { + match *self { + RedisValue::String(ref s) => Some(s.as_bytes()), + RedisValue::Bytes(ref b) => Some(b), + RedisValue::Queued => Some(QUEUED.as_bytes()), + _ => None, + } + } + + /// Attempt to convert the value to a `bool`. + pub fn as_bool(&self) -> Option { + match *self { + RedisValue::Boolean(b) => Some(b), + RedisValue::Integer(ref i) => match *i { + 0 => Some(false), + 1 => Some(true), + _ => None, + }, + RedisValue::String(ref s) => match s.as_bytes() { + b"true" | b"TRUE" | b"t" | b"T" | b"1" => Some(true), + b"false" | b"FALSE" | b"f" | b"F" | b"0" => Some(false), + _ => None, + }, + RedisValue::Null => Some(false), + RedisValue::Array(ref inner) => { + if inner.len() == 1 { + inner.first().and_then(|v| v.as_bool()) + } else { + None + } + } + _ => None, + } + } + + /// Convert the value to an array of `(value, score)` tuples if the redis value is an array result from a sorted set command with scores. + pub fn into_zset_result(self) -> Result, RedisError> { + protocol_utils::value_to_zset_result(self) + } + + /// Attempt to convert this value to a Redis map if it's an array with an even number of elements. + pub fn into_map(self) -> Result { + if let RedisValue::Map(map) = self { + return Ok(map); + } + + if let RedisValue::Array(mut values) = self { + if values.len() % 2 != 0 { + return Err(RedisError::new( + RedisErrorKind::Unknown, + "Expected an even number of elements.", + )); + } + let mut inner = HashMap::with_capacity(values.len() / 2); + while values.len() >= 2 { + let value = values.pop().unwrap(); + let key: RedisKey = values.pop().unwrap().try_into()?; + + inner.insert(key, value); + } + + Ok(RedisMap { inner }) + } else { + Err(RedisError::new(RedisErrorKind::Unknown, "Expected array.")) + } + } + + /// Convert the array value to a set, if possible. + pub fn into_set(self) -> Result, RedisError> { + if let RedisValue::Array(values) = self { + let mut out = HashSet::with_capacity(values.len()); + + for value in values.into_iter() { + out.insert(value); + } + Ok(out) + } else { + Err(RedisError::new(RedisErrorKind::Unknown, "Expected array.")) + } + } + + /// Convert this value to an array if it's an array or map. + /// + /// If the value is not an array or map this returns a single-element array containing the current value. + pub fn into_array(self) -> Vec { + match self { + RedisValue::Array(values) => values, + RedisValue::Map(map) => { + let mut out = Vec::with_capacity(map.len() * 2); + + for (key, value) in map.inner().into_iter() { + out.push(key.into()); + out.push(value); + } + out + } + _ => vec![self], + } + } + + /// Convert the value to an array of bytes, if possible. + pub fn into_owned_bytes(self) -> Option> { + let v = match self { + RedisValue::String(s) => s.to_string().into_bytes(), + RedisValue::Bytes(b) => b.to_vec(), + RedisValue::Null => NULL.as_bytes().to_vec(), + RedisValue::Queued => QUEUED.as_bytes().to_vec(), + RedisValue::Array(mut inner) => { + if inner.len() == 1 { + return inner.pop().and_then(|v| v.into_owned_bytes()); + } else { + return None; + } + } + RedisValue::Integer(i) => i.to_string().into_bytes(), + _ => return None, + }; + + Some(v) + } + + /// Convert the value into a `Bytes` view. + pub fn into_bytes(self) -> Option { + let v = match self { + RedisValue::String(s) => s.inner().clone(), + RedisValue::Bytes(b) => b, + RedisValue::Null => Bytes::from_static(NULL.as_bytes()), + RedisValue::Queued => Bytes::from_static(QUEUED.as_bytes()), + RedisValue::Array(mut inner) => { + if inner.len() == 1 { + return inner.pop().and_then(|v| v.into_bytes()); + } else { + return None; + } + } + RedisValue::Integer(i) => i.to_string().into(), + _ => return None, + }; + + Some(v) + } + + /// Return the length of the inner array if the value is an array. + pub fn array_len(&self) -> Option { + match self { + RedisValue::Array(ref a) => Some(a.len()), + _ => None, + } + } + + /// Flatten adjacent nested arrays to the provided depth. + /// + /// See the [XREAD](crate::interfaces::StreamsInterface::xread) documentation for an example of when this might be useful. + pub fn flatten_array_values(self, depth: usize) -> Self { + utils::flatten_nested_array_values(self, depth) + } + + /// A utility function to convert the response from `XREAD` or `XREADGROUP` into a type with a less verbose type declaration. + /// + /// This function supports responses in both RESP2 and RESP3 formats. + /// + /// See the [XREAD](crate::interfaces::StreamsInterface::xread) (or `XREADGROUP`) documentation for more information. + pub fn into_xread_response(self) -> Result, RedisError> + where + K1: FromRedisKey + Hash + Eq, + K2: FromRedisKey + Hash + Eq, + I: FromRedis, + V: FromRedis, + { + self.flatten_array_values(2).convert() + } + + /// A utility function to convert the response from `XCLAIM`, etc into a type with a less verbose type declaration. + /// + /// This function supports responses in both RESP2 and RESP3 formats. + pub fn into_xread_value(self) -> Result>, RedisError> + where + K: FromRedisKey + Hash + Eq, + I: FromRedis, + V: FromRedis, + { + self.flatten_array_values(1).convert() + } + + /// A utility function to convert the response from `XAUTOCLAIM` into a type with a less verbose type declaration. + /// + /// This function supports responses in both RESP2 and RESP3 formats. + // FIXME: this function also needs changes to support the Redis v7 format. + pub fn into_xautoclaim_values(self) -> Result<(String, Vec>), RedisError> + where + K: FromRedisKey + Hash + Eq, + I: FromRedis, + V: FromRedis, + { + if let RedisValue::Array(mut values) = self { + if values.len() != 2 { + warn!("Invalid XAUTOCLAIM response. If you're using Redis 7.x you may need to use xautoclaim instead of xautoclaim_values."); + Err(RedisError::new_parse("Expected 2-element array response.")) + } else { + // unwrap checked above + let entries = values.pop().unwrap(); + let cursor: String = values.pop().unwrap().convert()?; + + Ok((cursor, entries.flatten_array_values(1).convert()?)) + } + } else { + Err(RedisError::new_parse("Expected array response.")) + } + } + + /// Convert the value into a `GeoPosition`, if possible. + /// + /// Null values are returned as `None` to work more easily with the result of the `GEOPOS` command. + pub fn as_geo_position(&self) -> Result, RedisError> { + utils::value_to_geo_pos(self) + } + + /// Replace this value with `RedisValue::Null`, returning the original value. + pub fn take(&mut self) -> RedisValue { + mem::replace(self, RedisValue::Null) + } + + /// Attempt to convert this value to any value that implements the [FromRedis](crate::types::FromRedis) trait. + /// + /// ```rust + /// # use fred::types::RedisValue; + /// # use std::collections::HashMap; + /// let foo: usize = RedisValue::String("123".into()).convert()?; + /// let foo: i64 = RedisValue::String("123".into()).convert()?; + /// let foo: String = RedisValue::String("123".into()).convert()?; + /// let foo: Vec = RedisValue::Bytes(vec![102, 111, 111].into()).convert()?; + /// let foo: Vec = RedisValue::String("foo".into()).convert()?; + /// let foo: Vec = RedisValue::Array(vec!["a".into(), "b".into()]).convert()?; + /// let foo: HashMap = RedisValue::Array(vec![ + /// "a".into(), 1.into(), + /// "b".into(), 2.into() + /// ]) + /// .convert()?; + /// let foo: (String, i64) = RedisValue::Array(vec!["a".into(), 1.into()]).convert()?; + /// let foo: Vec<(String, i64)> = RedisValue::Array(vec![ + /// "a".into(), 1.into(), + /// "b".into(), 2.into() + /// ]) + /// .convert()?; + /// // ... + /// ``` + /// **Performance Considerations** + /// + /// The backing data type for potentially large values is either [Str](https://docs.rs/bytes-utils/latest/bytes_utils/string/type.Str.html) or [Bytes](https://docs.rs/bytes/latest/bytes/struct.Bytes.html). + /// + /// These values represent views into the buffer that receives data from the Redis server. As a result it is possible for callers to utilize `RedisValue` types in such a way that the underlying data is never moved or copied. + /// + /// If performance is a concern and callers do not need to modify the underlying data it is recommended that callers convert to `Str` or `Bytes` whenever possible. If callers do not want to take a dependency on the `Bytes` ecosystem types, or the values need to be mutated, then callers should use other types such as `String`, `Vec`, etc. It should be noted however that conversion to these other types will result in at least a move, if not a copy, of the underlying data. + pub fn convert(self) -> Result + where + R: FromRedis, + { + R::from_value(self) + } + + /// Whether or not the value can be hashed. + /// + /// Some use cases require using `RedisValue` types as keys in a `HashMap`, etc. Trying to do so with an aggregate type can panic, + /// and this function can be used to more gracefully handle this situation. + pub fn can_hash(&self) -> bool { + match self.kind() { + RedisValueKind::String + | RedisValueKind::Boolean + | RedisValueKind::Double + | RedisValueKind::Integer + | RedisValueKind::Bytes + | RedisValueKind::Null + | RedisValueKind::Array + | RedisValueKind::Queued => true, + _ => false, + } + } + + /// Convert the value to JSON. + #[cfg(feature = "serde-json")] + #[cfg_attr(docsrs, doc(cfg(feature = "serde-json")))] + pub fn into_json(self) -> Result { + Value::from_value(self) + } +} + +impl Hash for RedisValue { + fn hash(&self, state: &mut H) { + // used to prevent collisions between different types + let prefix = match self.kind() { + RedisValueKind::Boolean => b'B', + RedisValueKind::Double => b'd', + RedisValueKind::Integer => b'i', + RedisValueKind::String => b's', + RedisValueKind::Null => b'n', + RedisValueKind::Queued => b'q', + RedisValueKind::Array => b'a', + RedisValueKind::Map => b'm', + RedisValueKind::Bytes => b'b', + }; + prefix.hash(state); + + match *self { + RedisValue::Boolean(b) => b.hash(state), + RedisValue::Double(f) => f.to_be_bytes().hash(state), + RedisValue::Integer(d) => d.hash(state), + RedisValue::String(ref s) => s.hash(state), + RedisValue::Bytes(ref b) => b.hash(state), + RedisValue::Null => NULL.hash(state), + RedisValue::Queued => QUEUED.hash(state), + RedisValue::Array(ref arr) => { + for value in arr.iter() { + value.hash(state); + } + } + _ => panic!("Cannot hash aggregate value."), + } + } +} + +impl From for RedisValue { + fn from(d: u8) -> Self { + RedisValue::Integer(d as i64) + } +} + +impl From for RedisValue { + fn from(d: u16) -> Self { + RedisValue::Integer(d as i64) + } +} + +impl From for RedisValue { + fn from(d: u32) -> Self { + RedisValue::Integer(d as i64) + } +} + +impl From for RedisValue { + fn from(d: i8) -> Self { + RedisValue::Integer(d as i64) + } +} + +impl From for RedisValue { + fn from(d: i16) -> Self { + RedisValue::Integer(d as i64) + } +} + +impl From for RedisValue { + fn from(d: i32) -> Self { + RedisValue::Integer(d as i64) + } +} + +impl From for RedisValue { + fn from(d: i64) -> Self { + RedisValue::Integer(d) + } +} + +impl From for RedisValue { + fn from(f: f32) -> Self { + RedisValue::Double(f as f64) + } +} + +impl From for RedisValue { + fn from(f: f64) -> Self { + RedisValue::Double(f) + } +} + +impl TryFrom for RedisValue { + type Error = RedisError; + + fn try_from(d: u64) -> Result { + if d >= (i64::MAX as u64) { + return Err(RedisError::new(RedisErrorKind::Unknown, "Unsigned integer too large.")); + } + + Ok((d as i64).into()) + } +} + +impl TryFrom for RedisValue { + type Error = RedisError; + + fn try_from(d: u128) -> Result { + if d >= (i64::MAX as u128) { + return Err(RedisError::new(RedisErrorKind::Unknown, "Unsigned integer too large.")); + } + + Ok((d as i64).into()) + } +} + +impl TryFrom for RedisValue { + type Error = RedisError; + + fn try_from(d: i128) -> Result { + if d >= (i64::MAX as i128) { + return Err(RedisError::new(RedisErrorKind::Unknown, "Signed integer too large.")); + } + + Ok((d as i64).into()) + } +} + +impl TryFrom for RedisValue { + type Error = RedisError; + + fn try_from(d: usize) -> Result { + if d >= (i64::MAX as usize) { + return Err(RedisError::new(RedisErrorKind::Unknown, "Unsigned integer too large.")); + } + + Ok((d as i64).into()) + } +} + +impl From for RedisValue { + fn from(s: Str) -> Self { + RedisValue::String(s) + } +} + +impl From for RedisValue { + fn from(b: Bytes) -> Self { + RedisValue::Bytes(b) + } +} + +impl From for RedisValue { + fn from(d: String) -> Self { + RedisValue::String(Str::from(d)) + } +} + +impl<'a> From<&'a String> for RedisValue { + fn from(d: &'a String) -> Self { + RedisValue::String(Str::from(d)) + } +} + +impl<'a> From<&'a str> for RedisValue { + fn from(d: &'a str) -> Self { + RedisValue::String(Str::from(d)) + } +} + +impl<'a> From<&'a [u8]> for RedisValue { + fn from(b: &'a [u8]) -> Self { + RedisValue::Bytes(Bytes::from(b.to_vec())) + } +} + +impl From for RedisValue { + fn from(d: bool) -> Self { + RedisValue::Boolean(d) + } +} + +impl TryFrom> for RedisValue +where + T: TryInto, + T::Error: Into, +{ + type Error = RedisError; + + fn try_from(d: Option) -> Result { + match d { + Some(i) => to!(i), + None => Ok(RedisValue::Null), + } + } +} + +impl FromIterator for RedisValue { + fn from_iter>(iter: I) -> Self { + RedisValue::Array(iter.into_iter().collect()) + } +} + +impl TryFrom> for RedisValue +where + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from(d: HashMap) -> Result { + Ok(RedisValue::Map(RedisMap { + inner: utils::into_redis_map(d.into_iter())?, + })) + } +} + +impl TryFrom> for RedisValue +where + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from(d: BTreeMap) -> Result { + Ok(RedisValue::Map(RedisMap { + inner: utils::into_redis_map(d.into_iter())?, + })) + } +} + +impl From for RedisValue { + fn from(d: RedisKey) -> Self { + RedisValue::Bytes(d.key) + } +} + +impl From for RedisValue { + fn from(m: RedisMap) -> Self { + RedisValue::Map(m) + } +} + +impl From<()> for RedisValue { + fn from(_: ()) -> Self { + RedisValue::Null + } +} + +#[cfg(feature = "serde-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde-json")))] +impl TryFrom for RedisValue { + type Error = RedisError; + + fn try_from(v: Value) -> Result { + let value = match v { + Value::Null => RedisValue::Null, + Value::String(s) => RedisValue::String(s.into()), + Value::Bool(b) => RedisValue::Boolean(b), + Value::Number(n) => { + if n.is_i64() { + RedisValue::Integer(n.as_i64().unwrap()) + } else if n.is_f64() { + RedisValue::Double(n.as_f64().unwrap()) + } else { + return Err(RedisError::new(RedisErrorKind::InvalidArgument, "Invalid JSON number.")); + } + } + Value::Array(a) => { + let mut out = Vec::with_capacity(a.len()); + for value in a.into_iter() { + out.push(value.try_into()?); + } + RedisValue::Array(out) + } + Value::Object(m) => { + let mut out: HashMap = HashMap::with_capacity(m.len()); + for (key, value) in m.into_iter() { + out.insert(key.into(), value.try_into()?); + } + RedisValue::Map(RedisMap { inner: out }) + } + }; + + Ok(value) + } +} + +impl TryFrom for RedisValue { + type Error = RedisError; + + fn try_from(value: Resp3Frame) -> Result { + protocol_utils::frame_to_results(value) + } +} diff --git a/src/types/client.rs b/src/types/client.rs new file mode 100644 index 00000000..07f9e95e --- /dev/null +++ b/src/types/client.rs @@ -0,0 +1,112 @@ +use crate::utils; +use bytes_utils::Str; + +/// The type of clients to close. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClientKillType { + Normal, + Master, + Replica, + Pubsub, +} + +impl ClientKillType { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ClientKillType::Normal => "normal", + ClientKillType::Master => "master", + ClientKillType::Replica => "replica", + ClientKillType::Pubsub => "pubsub", + }) + } +} + +/// Filters provided to the CLIENT KILL command. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClientKillFilter { + ID(String), + Type(ClientKillType), + User(String), + Addr(String), + LAddr(String), + SkipMe(bool), +} + +impl ClientKillFilter { + pub(crate) fn to_str(&self) -> (Str, Str) { + let (prefix, value) = match *self { + ClientKillFilter::ID(ref id) => ("ID", id.into()), + ClientKillFilter::Type(ref kind) => ("TYPE", kind.to_str()), + ClientKillFilter::User(ref user) => ("USER", user.into()), + ClientKillFilter::Addr(ref addr) => ("ADDR", addr.into()), + ClientKillFilter::LAddr(ref addr) => ("LADDR", addr.into()), + ClientKillFilter::SkipMe(ref b) => ( + "SKIPME", + match *b { + true => utils::static_str("yes"), + false => utils::static_str("no"), + }, + ), + }; + + (utils::static_str(prefix), value) + } +} + +/// Filters for the CLIENT PAUSE command. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClientPauseKind { + Write, + All, +} + +impl ClientPauseKind { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ClientPauseKind::Write => "WRITE", + ClientPauseKind::All => "ALL", + }) + } +} + +/// Arguments for the CLIENT REPLY command. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClientReplyFlag { + On, + Off, + Skip, +} + +impl ClientReplyFlag { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ClientReplyFlag::On => "ON", + ClientReplyFlag::Off => "OFF", + ClientReplyFlag::Skip => "SKIP", + }) + } +} + +/// Arguments to the CLIENT UNBLOCK command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClientUnblockFlag { + Timeout, + Error, +} + +impl ClientUnblockFlag { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ClientUnblockFlag::Timeout => "TIMEOUT", + ClientUnblockFlag::Error => "ERROR", + }) + } +} diff --git a/src/types/cluster.rs b/src/types/cluster.rs new file mode 100644 index 00000000..335f5fac --- /dev/null +++ b/src/types/cluster.rs @@ -0,0 +1,91 @@ +use crate::utils; +use bytes_utils::Str; + +/// The state of the cluster from the CLUSTER INFO command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClusterState { + Ok, + Fail, +} + +impl Default for ClusterState { + fn default() -> Self { + ClusterState::Ok + } +} + +/// A parsed response from the CLUSTER INFO command. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq, Default)] +pub struct ClusterInfo { + pub cluster_state: ClusterState, + pub cluster_slots_assigned: u16, + pub cluster_slots_ok: u16, + pub cluster_slots_pfail: u16, + pub cluster_slots_fail: u16, + pub cluster_known_nodes: u16, + pub cluster_size: u32, + pub cluster_current_epoch: u64, + pub cluster_my_epoch: u64, + pub cluster_stats_messages_sent: u64, + pub cluster_stats_messages_received: u64, +} + +/// Options for the CLUSTER FAILOVER command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClusterFailoverFlag { + Force, + Takeover, +} + +impl ClusterFailoverFlag { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ClusterFailoverFlag::Force => "FORCE", + ClusterFailoverFlag::Takeover => "TAKEOVER", + }) + } +} + +/// Flags for the CLUSTER RESET command. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClusterResetFlag { + Hard, + Soft, +} + +impl ClusterResetFlag { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ClusterResetFlag::Hard => "HARD", + ClusterResetFlag::Soft => "SOFT", + }) + } +} + +/// Flags for the CLUSTER SETSLOT command. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClusterSetSlotState { + Importing, + Migrating, + Stable, + Node(String), +} + +impl ClusterSetSlotState { + pub(crate) fn to_str(&self) -> (Str, Option) { + let (prefix, value) = match *self { + ClusterSetSlotState::Importing => ("IMPORTING", None), + ClusterSetSlotState::Migrating => ("MIGRATING", None), + ClusterSetSlotState::Stable => ("STABLE", None), + ClusterSetSlotState::Node(ref n) => ("NODE", Some(n.into())), + }; + + (utils::static_str(prefix), value) + } +} diff --git a/src/types/config.rs b/src/types/config.rs new file mode 100644 index 00000000..8ef99bb8 --- /dev/null +++ b/src/types/config.rs @@ -0,0 +1,522 @@ +use crate::types::RespVersion; +use crate::utils; +use std::cmp; + +#[cfg(feature = "enable-tls")] +#[cfg_attr(docsrs, doc(cfg(feature = "enable-tls")))] +pub use crate::protocol::tls::TlsConfig; + +/// The default amount of jitter when waiting to reconnect. +pub const DEFAULT_JITTER_MS: u32 = 100; + +/// The type of reconnection policy to use. This will apply to every connection used by the client. +/// +/// Use a `max_attempts` value of `0` to retry forever. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ReconnectPolicy { + /// Wait a constant amount of time between reconnect attempts, in ms. + Constant { + attempts: u32, + max_attempts: u32, + delay: u32, + jitter: u32, + }, + /// Backoff reconnection attempts linearly, adding `delay` each time. + Linear { + attempts: u32, + max_attempts: u32, + max_delay: u32, + delay: u32, + jitter: u32, + }, + /// Backoff reconnection attempts exponentially, multiplying the last delay by `mult` each time. + Exponential { + attempts: u32, + max_attempts: u32, + min_delay: u32, + max_delay: u32, + mult: u32, + jitter: u32, + }, +} + +impl Default for ReconnectPolicy { + fn default() -> Self { + ReconnectPolicy::Constant { + attempts: 0, + max_attempts: 0, + delay: 1000, + jitter: DEFAULT_JITTER_MS, + } + } +} + +impl ReconnectPolicy { + /// Create a new reconnect policy with a constant backoff. + pub fn new_constant(max_attempts: u32, delay: u32) -> ReconnectPolicy { + ReconnectPolicy::Constant { + max_attempts, + delay, + attempts: 0, + jitter: DEFAULT_JITTER_MS, + } + } + + /// Create a new reconnect policy with a linear backoff. + pub fn new_linear(max_attempts: u32, max_delay: u32, delay: u32) -> ReconnectPolicy { + ReconnectPolicy::Linear { + max_attempts, + max_delay, + delay, + attempts: 0, + jitter: DEFAULT_JITTER_MS, + } + } + + /// Create a new reconnect policy with an exponential backoff. + pub fn new_exponential(max_attempts: u32, min_delay: u32, max_delay: u32, mult: u32) -> ReconnectPolicy { + ReconnectPolicy::Exponential { + max_delay, + max_attempts, + min_delay, + mult, + attempts: 0, + jitter: DEFAULT_JITTER_MS, + } + } + + /// Set the amount of jitter to add to each reconnect delay. + /// + /// Default: 100 ms + pub fn set_jitter(&mut self, jitter_ms: u32) { + match self { + ReconnectPolicy::Constant { ref mut jitter, .. } => { + *jitter = jitter_ms; + } + ReconnectPolicy::Linear { ref mut jitter, .. } => { + *jitter = jitter_ms; + } + ReconnectPolicy::Exponential { ref mut jitter, .. } => { + *jitter = jitter_ms; + } + } + } + + /// Reset the number of reconnection attempts. It's unlikely users will need to call this. + pub fn reset_attempts(&mut self) { + match *self { + ReconnectPolicy::Constant { ref mut attempts, .. } => { + *attempts = 0; + } + ReconnectPolicy::Linear { ref mut attempts, .. } => { + *attempts = 0; + } + ReconnectPolicy::Exponential { ref mut attempts, .. } => { + *attempts = 0; + } + } + } + + /// Read the number of reconnection attempts. + pub fn attempts(&self) -> u32 { + match *self { + ReconnectPolicy::Constant { ref attempts, .. } => *attempts, + ReconnectPolicy::Linear { ref attempts, .. } => *attempts, + ReconnectPolicy::Exponential { ref attempts, .. } => *attempts, + } + } + + /// Calculate the next delay, incrementing `attempts` in the process. + pub fn next_delay(&mut self) -> Option { + match *self { + ReconnectPolicy::Constant { + ref mut attempts, + delay, + max_attempts, + jitter, + } => { + *attempts = match utils::incr_with_max(*attempts, max_attempts) { + Some(a) => a, + None => return None, + }; + + Some(utils::add_jitter(delay as u64, jitter)) + } + ReconnectPolicy::Linear { + ref mut attempts, + max_delay, + max_attempts, + delay, + jitter, + } => { + *attempts = match utils::incr_with_max(*attempts, max_attempts) { + Some(a) => a, + None => return None, + }; + let delay = (delay as u64).saturating_mul(*attempts as u64); + + Some(cmp::min(max_delay as u64, utils::add_jitter(delay, jitter))) + } + ReconnectPolicy::Exponential { + ref mut attempts, + min_delay, + max_delay, + max_attempts, + mult, + jitter, + } => { + *attempts = match utils::incr_with_max(*attempts, max_attempts) { + Some(a) => a, + None => return None, + }; + let delay = (mult as u64) + .saturating_pow(*attempts - 1) + .saturating_mul(min_delay as u64); + + Some(cmp::min(max_delay as u64, utils::add_jitter(delay, jitter))) + } + } + } +} + +/// Describes how the client should respond when a command is sent while the client is in a blocked state from a blocking command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum Blocking { + /// Wait to send the command until the blocked command finishes. (Default) + Block, + /// Return an error to the caller. + Error, + /// Interrupt the blocked command by automatically sending `CLIENT UNBLOCK` for the blocked connection. + Interrupt, +} + +impl Default for Blocking { + fn default() -> Self { + Blocking::Block + } +} + +/// Configuration options for backpressure features in the client. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct BackpressureConfig { + /// Whether or not to disable the automatic backpressure features when pipelining is enabled. + /// + /// If `true` then `RedisErrorKind::Backpressure` errors may be surfaced to callers. + /// + /// Default: `false` + pub disable_auto_backpressure: bool, + /// Disable the backpressure scaling logic used to calculate the `sleep` duration when throttling commands. + /// + /// If `true` then the client will always wait a constant amount of time defined by `min_sleep_duration_ms` when throttling commands. + /// + /// Default: `false` + pub disable_backpressure_scaling: bool, + /// The minimum amount of time to wait when applying backpressure to a command. + /// + /// If `0` then no backpressure will be applied, but backpressure errors will not be surfaced to callers unless `disable_auto_backpressure` is `true`. + /// + /// Default: 100 ms + pub min_sleep_duration_ms: u64, + /// The maximum number of in-flight commands (per connection) before backpressure will be applied. + /// + /// Default: 5000 + pub max_in_flight_commands: u64, +} + +impl Default for BackpressureConfig { + fn default() -> Self { + BackpressureConfig { + disable_auto_backpressure: false, + disable_backpressure_scaling: false, + min_sleep_duration_ms: 100, + max_in_flight_commands: 5000, + } + } +} + +/// Configuration options that can affect the performance of the client. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct PerformanceConfig { + /// Whether or not the client should automatically pipeline commands when possible. + /// + /// Default: `true` + pub pipeline: bool, + /// The maximum number of times the client will attempt to send a command. + /// + /// This value be incremented on a command whenever the connection closes while the command is in-flight. + /// + /// Default: `3` + pub max_command_attempts: u32, + /// Configuration options for backpressure features in the client. + pub backpressure: BackpressureConfig, + /// An optional timeout (in milliseconds) to apply to all commands. + /// + /// If `0` this will disable any timeout being applied to commands. + /// + /// Default: `0` + pub default_command_timeout_ms: u64, + /// The maximum number of frames that will be passed to a socket before flushing the socket. + /// + /// Note: in some circumstances the client with always flush the socket (`QUIT`, `EXEC`, etc). + /// + /// Default: 1000 + pub max_feed_count: u64, + /// The amount of time, in milliseconds, to wait after a `MOVED` or `ASK` error is received before the client will update the cached cluster state and try again. + /// + /// If `0` the client will follow `MOVED` or `ASK` redirects as quickly as possible. However, this can result in some unnecessary state synchronization commands when large values are being moved between nodes. + /// + /// Default: 50 ms + pub cluster_cache_update_delay_ms: u64, +} + +impl Default for PerformanceConfig { + fn default() -> Self { + PerformanceConfig { + pipeline: true, + backpressure: BackpressureConfig::default(), + max_command_attempts: 3, + default_command_timeout_ms: 0, + max_feed_count: 1000, + cluster_cache_update_delay_ms: 50, + } + } +} + +/// Configuration options for a `RedisClient`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct RedisConfig { + /// Whether or not the client should return an error if it cannot connect to the server the first time when being initialized. + /// If `false` the client will run the reconnect logic if it cannot connect to the server the first time, but if `true` the client + /// will return initial connection errors to the caller immediately. + /// + /// Normally the reconnection logic only applies to connections that close unexpectedly, but this flag can apply the same logic to + /// the first connection as it is being created. + /// + /// Note: Callers should use caution setting this to `false` since it can make debugging configuration issues more difficult. + /// + /// Default: `true` + pub fail_fast: bool, + /// The default behavior of the client when a command is sent while the connection is blocked on a blocking command. + /// + /// Default: `Blocking::Block` + pub blocking: Blocking, + /// An optional ACL username for the client to use when authenticating. If ACL rules are not configured this should be `None`. + /// + /// Default: `None` + pub username: Option, + /// An optional password for the client to use when authenticating. + /// + /// Default: `None` + pub password: Option, + /// Connection configuration for the server(s). + /// + /// Default: `Centralized(localhost, 6379)` + pub server: ServerConfig, + /// The protocol version to use when communicating with the server(s). + /// + /// If RESP3 is specified the client will automatically use `HELLO` when authenticating. **This requires Redis >=6.0.0.** If the `HELLO` + /// command fails this will prevent the client from connecting. Callers should set this to RESP2 and use `HELLO` manually to fall back + /// to RESP2 if needed. + /// + /// Note: upgrading an existing codebase from RESP2 to RESP3 may require changing certain type signatures. RESP3 has a slightly different type system than RESP2. + /// + /// Default: `RESP2` + pub version: RespVersion, + /// Configuration options that can affect the performance of the client. + pub performance: PerformanceConfig, + /// An optional database number that the client will automatically `SELECT` after connecting or reconnecting. + /// + /// It is recommended that callers use this field instead of putting a `select()` call inside the `on_reconnect` block, if possible. Commands that were in-flight when the connection closed will retry before anything inside the `on_reconnect` block. + /// + /// Default: `None` + pub database: Option, + /// TLS configuration fields. If `None` the connection will not use TLS. + /// + /// Default: `None` + #[cfg(feature = "enable-tls")] + #[cfg_attr(docsrs, doc(cfg(feature = "enable-tls")))] + pub tls: Option, + /// Whether or not to enable tracing for this client. + /// + /// Default: `false` + #[cfg(feature = "partial-tracing")] + #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] + pub tracing: bool, +} + +impl Default for RedisConfig { + fn default() -> Self { + RedisConfig { + fail_fast: true, + blocking: Blocking::default(), + username: None, + password: None, + server: ServerConfig::default(), + version: RespVersion::RESP2, + performance: PerformanceConfig::default(), + database: None, + #[cfg(feature = "enable-tls")] + #[cfg_attr(docsrs, doc(cfg(feature = "enable-tls")))] + tls: None, + #[cfg(feature = "partial-tracing")] + #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] + tracing: false, + } + } +} + +impl RedisConfig { + /// Whether or not the client uses TLS. + #[cfg(feature = "enable-tls")] + pub fn uses_tls(&self) -> bool { + self.tls.is_some() + } + + /// Whether or not the client uses TLS. + #[cfg(not(feature = "enable-tls"))] + pub fn uses_tls(&self) -> bool { + false + } +} + +/// Connection configuration for the Redis server. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ServerConfig { + Centralized { + /// The hostname or IP address of the Redis server. + host: String, + /// The port on which the Redis server is listening. + port: u16, + }, + Clustered { + /// An array of `(host, port)` tuples for nodes in the cluster. Only one node in the cluster needs to be provided here, + /// the rest will be discovered via the `CLUSTER NODES` command. + hosts: Vec<(String, u16)>, + }, + Sentinel { + /// An array of `(host, port)` tuples for each known sentinel instance. + hosts: Vec<(String, u16)>, + /// The service name for primary/main instances. + service_name: String, + + /// An optional ACL username for the client to use when authenticating. + #[cfg(feature = "sentinel-auth")] + #[cfg_attr(docsrs, doc(cfg(feature = "sentinel-auth")))] + username: Option, + /// An optional password for the client to use when authenticating. + #[cfg(feature = "sentinel-auth")] + #[cfg_attr(docsrs, doc(cfg(feature = "sentinel-auth")))] + password: Option, + }, +} + +impl Default for ServerConfig { + fn default() -> Self { + ServerConfig::default_centralized() + } +} + +impl ServerConfig { + /// Create a new centralized config with the provided host and port. + pub fn new_centralized(host: S, port: u16) -> ServerConfig + where + S: Into, + { + ServerConfig::Centralized { + host: host.into(), + port, + } + } + + /// Create a new clustered config with the provided set of hosts and ports. + /// + /// Only one valid host in the cluster needs to be provided here. The client will use `CLUSTER NODES` to discover the other nodes. + pub fn new_clustered(mut hosts: Vec<(S, u16)>) -> ServerConfig + where + S: Into, + { + ServerConfig::Clustered { + hosts: hosts.drain(..).map(|(s, p)| (s.into(), p)).collect(), + } + } + + /// Create a new sentinel config with the provided set of hosts and the name of the service. + /// + /// This library will connect using the details from the [Redis documentation](https://redis.io/topics/sentinel-clients). + pub fn new_sentinel(mut hosts: Vec<(H, u16)>, service_name: N) -> ServerConfig + where + H: Into, + N: Into, + { + ServerConfig::Sentinel { + hosts: hosts.drain(..).map(|(h, p)| (h.into(), p)).collect(), + service_name: service_name.into(), + #[cfg(feature = "sentinel-auth")] + username: None, + #[cfg(feature = "sentinel-auth")] + password: None, + } + } + + /// Create a centralized config with default settings for a local deployment. + pub fn default_centralized() -> ServerConfig { + ServerConfig::Centralized { + host: "127.0.0.1".to_owned(), + port: 6379, + } + } + + /// Create a clustered config with the same defaults as specified in the `create-cluster` script provided by Redis. + pub fn default_clustered() -> ServerConfig { + ServerConfig::Clustered { + hosts: vec![ + ("127.0.0.1".to_owned(), 30001), + ("127.0.0.1".to_owned(), 30002), + ("127.0.0.1".to_owned(), 30003), + ], + } + } + + /// Check if the config is for a clustered Redis deployment. + pub fn is_clustered(&self) -> bool { + match self { + ServerConfig::Clustered { .. } => true, + _ => false, + } + } + + /// Check if the config is for a sentinel deployment. + pub fn is_sentinel(&self) -> bool { + match self { + ServerConfig::Sentinel { .. } => true, + _ => false, + } + } + + /// Read the server hosts or sentinel hosts if using the sentinel interface. + pub fn hosts(&self) -> Vec<(&str, u16)> { + match *self { + ServerConfig::Centralized { ref host, port } => vec![(host.as_str(), port)], + ServerConfig::Clustered { ref hosts } => hosts.iter().map(|(h, p)| (h.as_str(), *p)).collect(), + ServerConfig::Sentinel { ref hosts, .. } => hosts.iter().map(|(h, p)| (h.as_str(), *p)).collect(), + } + } +} + +#[cfg(test)] +mod tests { + use super::ReconnectPolicy; + + #[test] + fn should_get_next_delay_repeatedly() { + let mut policy = ReconnectPolicy::new_exponential(0, 100, 999999999, 2); + let mut last_delay = 1; + for _ in 0..9_999_999 { + let delay = policy.next_delay().unwrap(); + if delay < last_delay { + panic!("Invalid next delay: {:?}", delay); + } + last_delay = delay; + } + } +} diff --git a/src/types/geo.rs b/src/types/geo.rs new file mode 100644 index 00000000..1f6749f6 --- /dev/null +++ b/src/types/geo.rs @@ -0,0 +1,149 @@ +use crate::error::RedisError; +use crate::types::RedisValue; +use crate::utils; +use bytes_utils::Str; +use std::collections::VecDeque; +use std::convert::{TryFrom, TryInto}; + +/// A struct describing the longitude and latitude coordinates of a GEO command. +#[derive(Clone, Debug)] +pub struct GeoPosition { + pub longitude: f64, + pub latitude: f64, +} + +impl PartialEq for GeoPosition { + fn eq(&self, other: &Self) -> bool { + utils::f64_eq(self.longitude, other.longitude) && utils::f64_eq(self.latitude, other.latitude) + } +} + +impl Eq for GeoPosition {} + +impl From<(f64, f64)> for GeoPosition { + fn from(d: (f64, f64)) -> Self { + GeoPosition { + longitude: d.0, + latitude: d.1, + } + } +} + +/// Units for the GEO DIST command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum GeoUnit { + Meters, + Kilometers, + Miles, + Feet, +} + +impl GeoUnit { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + GeoUnit::Meters => "m", + GeoUnit::Kilometers => "km", + GeoUnit::Feet => "ft", + GeoUnit::Miles => "mi", + }) + } +} + +/// A struct describing the value inside a GEO data structure. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct GeoValue { + pub coordinates: GeoPosition, + pub member: RedisValue, +} + +impl GeoValue { + pub fn new>(coordinates: GeoPosition, member: V) -> Self { + let member = member.into(); + GeoValue { coordinates, member } + } +} + +impl TryFrom<(f64, f64, T)> for GeoValue +where + T: TryInto, + T::Error: Into, +{ + type Error = RedisError; + + fn try_from(v: (f64, f64, T)) -> Result { + Ok(GeoValue { + coordinates: GeoPosition { + longitude: v.0, + latitude: v.1, + }, + member: utils::try_into(v.2)?, + }) + } +} + +/// A convenience struct for commands that take one or more GEO values. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct MultipleGeoValues { + inner: Vec, +} + +impl MultipleGeoValues { + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn inner(self) -> Vec { + self.inner + } +} + +impl From for MultipleGeoValues { + fn from(d: GeoValue) -> Self { + MultipleGeoValues { inner: vec![d] } + } +} + +impl From> for MultipleGeoValues { + fn from(d: Vec) -> Self { + MultipleGeoValues { inner: d } + } +} + +impl From> for MultipleGeoValues { + fn from(d: VecDeque) -> Self { + MultipleGeoValues { + inner: d.into_iter().collect(), + } + } +} + +/// A typed struct representing the full output of the GEORADIUS (or similar) command. +#[derive(Clone, Debug)] +pub struct GeoRadiusInfo { + pub member: RedisValue, + pub position: Option, + pub distance: Option, + pub hash: Option, +} + +impl Default for GeoRadiusInfo { + fn default() -> Self { + GeoRadiusInfo { + member: RedisValue::Null, + position: None, + distance: None, + hash: None, + } + } +} + +impl PartialEq for GeoRadiusInfo { + fn eq(&self, other: &Self) -> bool { + self.member == other.member + && self.position == other.position + && self.hash == other.hash + && utils::f64_opt_eq(&self.distance, &other.distance) + } +} + +impl Eq for GeoRadiusInfo {} diff --git a/src/types/lists.rs b/src/types/lists.rs new file mode 100644 index 00000000..b444d762 --- /dev/null +++ b/src/types/lists.rs @@ -0,0 +1,36 @@ +use crate::utils; +use bytes_utils::Str; + +/// The direction to move elements in a *LMOVE command. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum LMoveDirection { + Left, + Right, +} + +impl LMoveDirection { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + LMoveDirection::Left => "LEFT", + LMoveDirection::Right => "RIGHT", + }) + } +} + +/// Location flag for the `LINSERT` command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ListLocation { + Before, + After, +} + +impl ListLocation { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ListLocation::Before => "BEFORE", + ListLocation::After => "AFTER", + }) + } +} diff --git a/src/types/misc.rs b/src/types/misc.rs new file mode 100644 index 00000000..0b3b6a86 --- /dev/null +++ b/src/types/misc.rs @@ -0,0 +1,396 @@ +use crate::utils; +use bytes_utils::Str; +use std::collections::HashMap; +use std::fmt; + +/// Arguments passed to the SHUTDOWN command. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ShutdownFlags { + Save, + NoSave, +} + +impl ShutdownFlags { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ShutdownFlags::Save => "SAVE", + ShutdownFlags::NoSave => "NOSAVE", + }) + } +} + +/// An event on the publish-subscribe interface describing a keyspace notification. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct KeyspaceEvent { + pub db: u8, + pub operation: String, + pub key: String, +} + +/// Aggregate options for the [zinterstore](https://redis.io/commands/zinterstore) (and related) commands. +pub enum AggregateOptions { + Sum, + Min, + Max, +} + +impl AggregateOptions { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + AggregateOptions::Sum => "SUM", + AggregateOptions::Min => "MIN", + AggregateOptions::Max => "MAX", + }) + } +} + +/// Options for the [info](https://redis.io/commands/info) command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum InfoKind { + Default, + All, + Keyspace, + Cluster, + CommandStats, + Cpu, + Replication, + Stats, + Persistence, + Memory, + Clients, + Server, +} + +impl InfoKind { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + InfoKind::Default => "default", + InfoKind::All => "all", + InfoKind::Keyspace => "keyspace", + InfoKind::Cluster => "cluster", + InfoKind::CommandStats => "commandstats", + InfoKind::Cpu => "cpu", + InfoKind::Replication => "replication", + InfoKind::Stats => "stats", + InfoKind::Persistence => "persistence", + InfoKind::Memory => "memory", + InfoKind::Clients => "clients", + InfoKind::Server => "server", + }) + } +} + +/// Configuration for custom redis commands, primarily used for interacting with third party modules or extensions. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct CustomCommand { + /// The command name, sent directly to the server. + pub cmd: Str, + /// The hash slot to use for the provided command when running against a cluster. If a hash slot is not provided the command will run against a random node in the cluster. + pub hash_slot: Option, + /// Whether or not the command should block the connection while waiting on a response. + pub is_blocking: bool, +} + +impl CustomCommand { + /// create a new custom command. + /// + /// see the [custom](crate::interfaces::ClientLike::custom) command for more information. + pub fn new(cmd: C, hash_slot: Option, is_blocking: bool) -> Self + where + C: Into, + { + CustomCommand { + cmd: cmd.into(), + hash_slot, + is_blocking, + } + } + + /// Create a new custom command specified by a `&'static str`. + pub fn new_static(cmd: &'static str, hash_slot: Option, is_blocking: bool) -> Self { + CustomCommand { + cmd: utils::static_str(cmd), + hash_slot, + is_blocking, + } + } +} + +/// An enum describing the possible ways in which a Redis cluster can change state. +/// +/// See [on_cluster_change](crate::clients::RedisClient::on_cluster_change) for more information. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClusterStateChange { + /// A node was added to the cluster. + /// + /// This implies that hash slots were also probably rebalanced. + Add((String, u16)), + /// A node was removed from the cluster. + /// + /// This implies that hash slots were also probably rebalanced. + Remove((String, u16)), + /// Hash slots were rebalanced across the cluster. + Rebalance, +} + +/// Options for the [set](https://redis.io/commands/set) command. +/// +/// +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum SetOptions { + NX, + XX, +} + +impl SetOptions { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + SetOptions::NX => "NX", + SetOptions::XX => "XX", + }) + } +} + +/// Expiration options for the [set](https://redis.io/commands/set) command. +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum Expiration { + /// Expiration in seconds. + EX(i64), + /// Expiration in milliseconds. + PX(i64), + /// Expiration time, in seconds. + EXAT(i64), + /// Expiration time, in milliseconds. + PXAT(i64), + /// Do not reset the TTL. + KEEPTTL, +} + +impl Expiration { + pub(crate) fn into_args(self) -> (Str, Option) { + let (prefix, value) = match self { + Expiration::EX(i) => ("EX", Some(i)), + Expiration::PX(i) => ("PX", Some(i)), + Expiration::EXAT(i) => ("EXAT", Some(i)), + Expiration::PXAT(i) => ("PXAT", Some(i)), + Expiration::KEEPTTL => ("KEEPTTL", None), + }; + + (utils::static_str(prefix), value) + } +} + +/// The state of the underlying connection to the Redis server. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClientState { + Disconnected, + Disconnecting, + Connected, + Connecting, +} + +impl ClientState { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ClientState::Connecting => "Connecting", + ClientState::Connected => "Connected", + ClientState::Disconnecting => "Disconnecting", + ClientState::Disconnected => "Disconnected", + }) + } +} + +impl fmt::Display for ClientState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.to_str()) + } +} + +/// The parsed result of the MEMORY STATS command for a specific database. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct DatabaseMemoryStats { + pub overhead_hashtable_main: u64, + pub overhead_hashtable_expires: u64, +} + +impl Default for DatabaseMemoryStats { + fn default() -> Self { + DatabaseMemoryStats { + overhead_hashtable_expires: 0, + overhead_hashtable_main: 0, + } + } +} + +/// The parsed result of the MEMORY STATS command. +/// +/// +#[derive(Clone, Debug)] +pub struct MemoryStats { + pub peak_allocated: u64, + pub total_allocated: u64, + pub startup_allocated: u64, + pub replication_backlog: u64, + pub clients_slaves: u64, + pub clients_normal: u64, + pub aof_buffer: u64, + pub lua_caches: u64, + pub overhead_total: u64, + pub keys_count: u64, + pub keys_bytes_per_key: u64, + pub dataset_bytes: u64, + pub dataset_percentage: f64, + pub peak_percentage: f64, + pub fragmentation: f64, + pub fragmentation_bytes: u64, + pub rss_overhead_ratio: f64, + pub rss_overhead_bytes: u64, + pub allocator_allocated: u64, + pub allocator_active: u64, + pub allocator_resident: u64, + pub allocator_fragmentation_ratio: f64, + pub allocator_fragmentation_bytes: u64, + pub allocator_rss_ratio: f64, + pub allocator_rss_bytes: u64, + pub db: HashMap, +} + +impl Default for MemoryStats { + fn default() -> Self { + MemoryStats { + peak_allocated: 0, + total_allocated: 0, + startup_allocated: 0, + replication_backlog: 0, + clients_normal: 0, + clients_slaves: 0, + aof_buffer: 0, + lua_caches: 0, + overhead_total: 0, + keys_count: 0, + keys_bytes_per_key: 0, + dataset_bytes: 0, + dataset_percentage: 0.0, + peak_percentage: 0.0, + fragmentation: 0.0, + fragmentation_bytes: 0, + rss_overhead_ratio: 0.0, + rss_overhead_bytes: 0, + allocator_allocated: 0, + allocator_active: 0, + allocator_resident: 0, + allocator_fragmentation_ratio: 0.0, + allocator_fragmentation_bytes: 0, + allocator_rss_bytes: 0, + allocator_rss_ratio: 0.0, + db: HashMap::new(), + } + } +} + +impl PartialEq for MemoryStats { + fn eq(&self, other: &Self) -> bool { + self.peak_allocated == other.peak_allocated + && self.total_allocated == other.total_allocated + && self.startup_allocated == other.startup_allocated + && self.replication_backlog == other.replication_backlog + && self.clients_normal == other.clients_normal + && self.clients_slaves == other.clients_slaves + && self.aof_buffer == other.aof_buffer + && self.lua_caches == other.lua_caches + && self.overhead_total == other.overhead_total + && self.keys_count == other.keys_count + && self.keys_bytes_per_key == other.keys_bytes_per_key + && self.dataset_bytes == other.dataset_bytes + && utils::f64_eq(self.dataset_percentage, other.dataset_percentage) + && utils::f64_eq(self.peak_percentage, other.peak_percentage) + && utils::f64_eq(self.fragmentation, other.fragmentation) + && self.fragmentation_bytes == other.fragmentation_bytes + && utils::f64_eq(self.rss_overhead_ratio, other.rss_overhead_ratio) + && self.rss_overhead_bytes == other.rss_overhead_bytes + && self.allocator_allocated == other.allocator_allocated + && self.allocator_active == other.allocator_active + && self.allocator_resident == other.allocator_resident + && utils::f64_eq(self.allocator_fragmentation_ratio, other.allocator_fragmentation_ratio) + && self.allocator_fragmentation_bytes == other.allocator_fragmentation_bytes + && self.allocator_rss_bytes == other.allocator_rss_bytes + && utils::f64_eq(self.allocator_rss_ratio, other.allocator_rss_ratio) + && self.db == other.db + } +} + +impl Eq for MemoryStats {} + +/// The output of an entry in the slow queries log. +/// +/// +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SlowlogEntry { + pub id: i64, + pub timestamp: i64, + pub duration: u64, + pub args: Vec, + pub ip: Option, + pub name: Option, +} + +/// Flags for the SCRIPT DEBUG command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ScriptDebugFlag { + Yes, + No, + Sync, +} + +impl ScriptDebugFlag { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ScriptDebugFlag::Yes => "YES", + ScriptDebugFlag::No => "NO", + ScriptDebugFlag::Sync => "SYNC", + }) + } +} + +/// Arguments for the `SENTINEL SIMULATE-FAILURE` command. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg(feature = "sentinel-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "sentinel-client")))] +pub enum SentinelFailureKind { + CrashAfterElection, + CrashAfterPromotion, + Help, +} + +#[cfg(feature = "sentinel-client")] +impl SentinelFailureKind { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match self { + SentinelFailureKind::CrashAfterElection => "crash-after-election", + SentinelFailureKind::CrashAfterPromotion => "crash-after-promotion", + SentinelFailureKind::Help => "help", + }) + } +} + +/// The sort order for redis commands that take or return a sorted list. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum SortOrder { + Asc, + Desc, +} + +impl SortOrder { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + SortOrder::Asc => "ASC", + SortOrder::Desc => "DESC", + }) + } +} diff --git a/src/types/mod.rs b/src/types/mod.rs new file mode 100644 index 00000000..1af9b240 --- /dev/null +++ b/src/types/mod.rs @@ -0,0 +1,59 @@ +use crate::error::RedisError; +use std::net::SocketAddr; +use tokio::task::JoinHandle; + +mod acl; +mod args; +mod client; +mod cluster; +mod config; +mod geo; +mod lists; +mod misc; +mod multiple; +mod scan; +mod sorted_sets; +mod streams; + +pub use acl::*; +pub use args::*; +pub use client::*; +pub use cluster::*; +pub use config::*; +pub use geo::*; +pub use lists::*; +pub use misc::*; +pub use multiple::*; +pub use scan::*; +pub use sorted_sets::*; +pub use streams::*; + +#[cfg(feature = "metrics")] +#[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] +pub use crate::modules::metrics::Stats; + +pub(crate) static QUEUED: &'static str = "QUEUED"; +pub(crate) static NIL: &'static str = "nil"; + +pub use crate::modules::response::{FromRedis, FromRedisKey}; +pub use crate::protocol::types::ClusterKeyCache; +pub use redis_protocol::resp3::types::{Frame, RespVersion}; + +/// The ANY flag used on certain GEO commands. +pub type Any = bool; +/// The result from any of the `connect` functions showing the error that closed the connection, if any. +pub type ConnectHandle = JoinHandle>; +/// A tuple of `(offset, count)` values for commands that allow paging through results. +pub type Limit = (i64, i64); +/// An argument type equivalent to "[LIMIT count]". +pub type LimitCount = Option; + +/// A trait that can be used to override DNS resolution logic for a client. +/// +/// Note: using this requires [async-trait](https://crates.io/crates/async-trait). +// TODO expose this to callers so they can do their own DNS resolution +#[async_trait] +pub(crate) trait Resolve: Send + Sync + 'static { + /// Resolve a hostname. + async fn resolve(&self, host: String, port: u16) -> Result; +} diff --git a/src/types/multiple.rs b/src/types/multiple.rs new file mode 100644 index 00000000..8aed580f --- /dev/null +++ b/src/types/multiple.rs @@ -0,0 +1,232 @@ +use crate::error::RedisError; +use crate::types::{RedisKey, RedisValue}; +use std::collections::VecDeque; +use std::convert::{TryFrom, TryInto}; +use std::iter::FromIterator; + +/// Convenience struct for commands that take 1 or more keys. +/// +/// **Note: this can also be used to represent an empty array of keys by passing `None` to any function that takes `Into`.** This +/// is mostly useful for `EVAL` and `EVALSHA`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct MultipleKeys { + keys: Vec, +} + +impl MultipleKeys { + pub fn new() -> MultipleKeys { + MultipleKeys { keys: Vec::new() } + } + + pub fn inner(self) -> Vec { + self.keys + } + + pub fn len(&self) -> usize { + self.keys.len() + } +} + +impl From> for MultipleKeys { + fn from(key: Option) -> Self { + let keys = if let Some(key) = key { vec![key] } else { vec![] }; + MultipleKeys { keys } + } +} + +impl From for MultipleKeys +where + T: Into, +{ + fn from(d: T) -> Self { + MultipleKeys { keys: vec![d.into()] } + } +} + +impl FromIterator for MultipleKeys +where + T: Into, +{ + fn from_iter>(iter: I) -> Self { + MultipleKeys { + keys: iter.into_iter().map(|k| k.into()).collect(), + } + } +} + +impl From> for MultipleKeys +where + T: Into, +{ + fn from(d: Vec) -> Self { + MultipleKeys { + keys: d.into_iter().map(|k| k.into()).collect(), + } + } +} + +impl From> for MultipleKeys +where + T: Into, +{ + fn from(d: VecDeque) -> Self { + MultipleKeys { + keys: d.into_iter().map(|k| k.into()).collect(), + } + } +} + +/// Convenience struct for commands that take 1 or more strings. +pub type MultipleStrings = MultipleKeys; + +/// Convenience struct for commands that take 1 or more values. +/// +/// **Note: this can be used to represent an empty set of values by using `None` for any function that takes `Into`.** This +/// is most useful for `EVAL` and `EVALSHA`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct MultipleValues { + values: Vec, +} + +impl MultipleValues { + pub fn inner(self) -> Vec { + self.values + } + + pub fn len(&self) -> usize { + self.values.len() + } + + /// Convert this a nested `RedisValue`. + pub fn into_values(self) -> RedisValue { + RedisValue::Array(self.values) + } +} + +impl From> for MultipleValues { + fn from(val: Option) -> Self { + let values = if let Some(val) = val { vec![val] } else { vec![] }; + MultipleValues { values } + } +} + +/* +// https://github.com/rust-lang/rust/issues/50133 +// FIXME there has to be a way around this issue? +impl TryFrom for MultipleValues +where + T: TryInto, + T::Error: Into, +{ + type Error = RedisError; + + fn try_from(d: T) -> Result { + Ok(MultipleValues { values: vec![to!(d)?] }) + } +} +*/ + +// TODO consider supporting conversion from tuples with a reasonable size + +impl From for MultipleValues +where + T: Into, +{ + fn from(d: T) -> Self { + MultipleValues { values: vec![d.into()] } + } +} + +impl FromIterator for MultipleValues +where + T: Into, +{ + fn from_iter>(iter: I) -> Self { + MultipleValues { + values: iter.into_iter().map(|v| v.into()).collect(), + } + } +} + +impl TryFrom> for MultipleValues +where + T: TryInto, + T::Error: Into, +{ + type Error = RedisError; + + fn try_from(d: Vec) -> Result { + let mut values = Vec::with_capacity(d.len()); + for value in d.into_iter() { + values.push(to!(value)?); + } + + Ok(MultipleValues { values }) + } +} + +impl TryFrom> for MultipleValues +where + T: TryInto, + T::Error: Into, +{ + type Error = RedisError; + + fn try_from(d: VecDeque) -> Result { + let mut values = Vec::with_capacity(d.len()); + for value in d.into_iter() { + values.push(to!(value)?); + } + + Ok(MultipleValues { values }) + } +} + +/// A convenience struct for functions that take one or more hash slot values. +pub struct MultipleHashSlots { + inner: Vec, +} + +impl MultipleHashSlots { + pub fn inner(self) -> Vec { + self.inner + } + + pub fn len(&self) -> usize { + self.inner.len() + } +} + +impl From for MultipleHashSlots { + fn from(d: u16) -> Self { + MultipleHashSlots { inner: vec![d] } + } +} + +impl From> for MultipleHashSlots { + fn from(d: Vec) -> Self { + MultipleHashSlots { inner: d } + } +} + +impl<'a> From<&'a [u16]> for MultipleHashSlots { + fn from(d: &'a [u16]) -> Self { + MultipleHashSlots { inner: d.to_vec() } + } +} + +impl From> for MultipleHashSlots { + fn from(d: VecDeque) -> Self { + MultipleHashSlots { + inner: d.into_iter().collect(), + } + } +} + +impl FromIterator for MultipleHashSlots { + fn from_iter>(iter: I) -> Self { + MultipleHashSlots { + inner: iter.into_iter().collect(), + } + } +} diff --git a/src/types/scan.rs b/src/types/scan.rs new file mode 100644 index 00000000..13005f77 --- /dev/null +++ b/src/types/scan.rs @@ -0,0 +1,276 @@ +use crate::clients::RedisClient; +use crate::error::RedisError; +use crate::modules::inner::RedisClientInner; +use crate::protocol::types::{KeyScanInner, RedisCommand, RedisCommandKind, ValueScanInner}; +use crate::types::{RedisKey, RedisMap, RedisValue}; +use crate::utils; +use bytes_utils::Str; +use std::sync::Arc; + +/// The types of values supported by the [type](https://redis.io/commands/type) command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ScanType { + Set, + String, + ZSet, + List, + Hash, + Stream, +} + +impl ScanType { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ScanType::Set => "set", + ScanType::String => "string", + ScanType::List => "list", + ScanType::ZSet => "zset", + ScanType::Hash => "hash", + ScanType::Stream => "stream", + }) + } +} + +/// The result of a SCAN operation. +pub struct ScanResult { + pub(crate) results: Option>, + pub(crate) inner: Arc, + pub(crate) args: Vec, + pub(crate) scan_state: KeyScanInner, + pub(crate) can_continue: bool, +} + +impl ScanResult { + /// Read the current cursor from the SCAN operation. + pub fn cursor(&self) -> &str { + &self.scan_state.cursor + } + + /// Whether or not the scan call will continue returning results. If `false` this will be the last result set returned on the stream. + /// + /// Calling `next` when this returns `false` will return `Ok(())`, so this does not need to be checked on each result. + pub fn has_more(&self) -> bool { + self.can_continue + } + + /// A reference to the results of the SCAN operation. + pub fn results(&self) -> &Option> { + &self.results + } + + /// Take ownership over the results of the SCAN operation. Calls to `results` or `take_results` will return `None` afterwards. + pub fn take_results(&mut self) -> Option> { + self.results.take() + } + + /// Move on to the next page of results from the SCAN operation. If no more results are available this may close the stream. + /// + /// **This must be called to continue scanning the keyspace.** Results are not automatically scanned in the background since + /// this could cause the buffer backing the stream to grow too large very quickly. This interface provides a mechanism + /// for throttling the throughput of the SCAN call. If this struct is dropped without calling this function the stream will + /// close without an error. + /// + /// If this function returns an error the scan call cannot continue as the client has been closed, or some other fatal error + /// has occurred. If this happens the error will appear in the stream from the original SCAN call. + pub fn next(self) -> Result<(), RedisError> { + if !self.can_continue { + return Ok(()); + } + + let kind = RedisCommandKind::Scan(self.scan_state); + let cmd = RedisCommand::new(kind, self.args, None); + utils::send_command(&self.inner, cmd) + } + + /// A lightweight function to create a Redis client from the SCAN result. + /// + /// To continue scanning the caller should call `next` on this struct. Calling `scan` again on the client will initiate a new SCAN call starting with a cursor of 0. + pub fn create_client(&self) -> RedisClient { + RedisClient { + inner: self.inner.clone(), + } + } +} + +/// The result of a HSCAN operation. +pub struct HScanResult { + pub(crate) results: Option, + pub(crate) inner: Arc, + pub(crate) args: Vec, + pub(crate) scan_state: ValueScanInner, + pub(crate) can_continue: bool, +} + +impl HScanResult { + /// Read the current cursor from the SCAN operation. + pub fn cursor(&self) -> &str { + &self.scan_state.cursor + } + + /// Whether or not the scan call will continue returning results. If `false` this will be the last result set returned on the stream. + /// + /// Calling `next` when this returns `false` will return `Ok(())`, so this does not need to be checked on each result. + pub fn has_more(&self) -> bool { + self.can_continue + } + + /// A reference to the results of the HSCAN operation. + pub fn results(&self) -> &Option { + &self.results + } + + /// Take ownership over the results of the HSCAN operation. Calls to `results` or `take_results` will return `None` afterwards. + pub fn take_results(&mut self) -> Option { + self.results.take() + } + + /// Move on to the next page of results from the HSCAN operation. If no more results are available this may close the stream. + /// + /// **This must be called to continue scanning the keyspace.** Results are not automatically scanned in the background since + /// this could cause the buffer backing the stream to grow too large very quickly. This interface provides a mechanism + /// for throttling the throughput of the SCAN call. If this struct is dropped without calling this function the stream will + /// close without an error. + /// + /// If this function returns an error the scan call cannot continue as the client has been closed, or some other fatal error + /// has occurred. If this happens the error will appear in the stream from the original SCAN call. + pub fn next(self) -> Result<(), RedisError> { + if !self.can_continue { + return Ok(()); + } + + let kind = RedisCommandKind::Hscan(self.scan_state); + let cmd = RedisCommand::new(kind, self.args, None); + utils::send_command(&self.inner, cmd) + } + + /// A lightweight function to create a Redis client from the HSCAN result. + /// + /// To continue scanning the caller should call `next` on this struct. Calling `hscan` again on the client will initiate a new HSCAN call starting with a cursor of 0. + pub fn create_client(&self) -> RedisClient { + RedisClient { + inner: self.inner.clone(), + } + } +} + +/// The result of a SCAN operation. +pub struct SScanResult { + pub(crate) results: Option>, + pub(crate) inner: Arc, + pub(crate) args: Vec, + pub(crate) scan_state: ValueScanInner, + pub(crate) can_continue: bool, +} + +impl SScanResult { + /// Read the current cursor from the SSCAN operation. + pub fn cursor(&self) -> &str { + &self.scan_state.cursor + } + + /// Whether or not the scan call will continue returning results. If `false` this will be the last result set returned on the stream. + /// + /// Calling `next` when this returns `false` will return `Ok(())`, so this does not need to be checked on each result. + pub fn has_more(&self) -> bool { + self.can_continue + } + + /// A reference to the results of the SCAN operation. + pub fn results(&self) -> &Option> { + &self.results + } + + /// Take ownership over the results of the SSCAN operation. Calls to `results` or `take_results` will return `None` afterwards. + pub fn take_results(&mut self) -> Option> { + self.results.take() + } + + /// Move on to the next page of results from the SSCAN operation. If no more results are available this may close the stream. + /// + /// **This must be called to continue scanning the keyspace.** Results are not automatically scanned in the background since + /// this could cause the buffer backing the stream to grow too large very quickly. This interface provides a mechanism + /// for throttling the throughput of the SCAN call. If this struct is dropped without calling this function the stream will + /// close without an error. + /// + /// If this function returns an error the scan call cannot continue as the client has been closed, or some other fatal error + /// has occurred. If this happens the error will appear in the stream from the original SCAN call. + pub fn next(self) -> Result<(), RedisError> { + if !self.can_continue { + return Ok(()); + } + + let kind = RedisCommandKind::Sscan(self.scan_state); + let cmd = RedisCommand::new(kind, self.args, None); + utils::send_command(&self.inner, cmd) + } + + /// A lightweight function to create a Redis client from the SSCAN result. + /// + /// To continue scanning the caller should call `next` on this struct. Calling `sscan` again on the client will initiate a new SSCAN call starting with a cursor of 0. + pub fn create_client(&self) -> RedisClient { + RedisClient { + inner: self.inner.clone(), + } + } +} + +/// The result of a SCAN operation. +pub struct ZScanResult { + pub(crate) results: Option>, + pub(crate) inner: Arc, + pub(crate) args: Vec, + pub(crate) scan_state: ValueScanInner, + pub(crate) can_continue: bool, +} + +impl ZScanResult { + /// Read the current cursor from the ZSCAN operation. + pub fn cursor(&self) -> &str { + &self.scan_state.cursor + } + + /// Whether or not the scan call will continue returning results. If `false` this will be the last result set returned on the stream. + /// + /// Calling `next` when this returns `false` will return `Ok(())`, so this does not need to be checked on each result. + pub fn has_more(&self) -> bool { + self.can_continue + } + + /// A reference to the results of the ZSCAN operation. + pub fn results(&self) -> &Option> { + &self.results + } + + /// Take ownership over the results of the ZSCAN operation. Calls to `results` or `take_results` will return `None` afterwards. + pub fn take_results(&mut self) -> Option> { + self.results.take() + } + + /// Move on to the next page of results from the ZSCAN operation. If no more results are available this may close the stream. + /// + /// **This must be called to continue scanning the keyspace.** Results are not automatically scanned in the background since + /// this could cause the buffer backing the stream to grow too large very quickly. This interface provides a mechanism + /// for throttling the throughput of the SCAN call. If this struct is dropped without calling this function the stream will + /// close without an error. + /// + /// If this function returns an error the scan call cannot continue as the client has been closed, or some other fatal error + /// has occurred. If this happens the error will appear in the stream from the original SCAN call. + pub fn next(self) -> Result<(), RedisError> { + if !self.can_continue { + return Ok(()); + } + + let kind = RedisCommandKind::Zscan(self.scan_state); + let cmd = RedisCommand::new(kind, self.args, None); + utils::send_command(&self.inner, cmd) + } + + /// A lightweight function to create a Redis client from the ZSCAN result. + /// + /// To continue scanning the caller should call `next` on this struct. Calling `zscan` again on the client will initiate a new ZSCAN call starting with a cursor of 0. + pub fn create_client(&self) -> RedisClient { + RedisClient { + inner: self.inner.clone(), + } + } +} diff --git a/src/types/sorted_sets.rs b/src/types/sorted_sets.rs new file mode 100644 index 00000000..ceba075c --- /dev/null +++ b/src/types/sorted_sets.rs @@ -0,0 +1,350 @@ +use crate::error::{RedisError, RedisErrorKind}; +use crate::types::RedisValue; +use crate::utils; +use bytes_utils::Str; +use std::collections::VecDeque; +use std::convert::{TryFrom, TryInto}; +use std::iter::FromIterator; + +/// Convenience struct for `ZINTERSTORE` and `ZUNIONSTORE` when accepting 1 or more `weights` arguments. +pub struct MultipleWeights { + values: Vec, +} + +impl MultipleWeights { + pub fn new() -> MultipleWeights { + MultipleWeights { values: Vec::new() } + } + + pub fn inner(self) -> Vec { + self.values + } + + pub fn len(&self) -> usize { + self.values.len() + } +} + +impl From> for MultipleWeights { + fn from(d: Option) -> Self { + match d { + Some(w) => w.into(), + None => MultipleWeights::new(), + } + } +} + +impl From for MultipleWeights { + fn from(d: f64) -> Self { + MultipleWeights { values: vec![d] } + } +} + +impl FromIterator for MultipleWeights { + fn from_iter>(iter: I) -> Self { + MultipleWeights { + values: iter.into_iter().collect(), + } + } +} + +impl From> for MultipleWeights { + fn from(d: Vec) -> Self { + MultipleWeights { values: d } + } +} + +impl From> for MultipleWeights { + fn from(d: VecDeque) -> Self { + MultipleWeights { + values: d.into_iter().collect(), + } + } +} + +/// Convenience struct for the `ZADD` command to accept 1 or more `(score, value)` arguments. +pub struct MultipleZaddValues { + values: Vec<(f64, RedisValue)>, +} + +impl MultipleZaddValues { + pub fn new() -> MultipleZaddValues { + MultipleZaddValues { values: Vec::new() } + } + + pub fn inner(self) -> Vec<(f64, RedisValue)> { + self.values + } + + pub fn len(&self) -> usize { + self.values.len() + } +} + +impl TryFrom<(f64, T)> for MultipleZaddValues +where + T: TryInto, + T::Error: Into, +{ + type Error = RedisError; + + fn try_from((f, d): (f64, T)) -> Result { + Ok(MultipleZaddValues { + values: vec![(f, to!(d)?)], + }) + } +} + +impl FromIterator<(f64, T)> for MultipleZaddValues +where + T: Into, +{ + fn from_iter>(iter: I) -> Self { + MultipleZaddValues { + values: iter.into_iter().map(|(f, d)| (f, d.into())).collect(), + } + } +} + +impl TryFrom> for MultipleZaddValues +where + T: TryInto, + T::Error: Into, +{ + type Error = RedisError; + + fn try_from(d: Vec<(f64, T)>) -> Result { + let mut values = Vec::with_capacity(d.len()); + for (f, v) in d.into_iter() { + values.push((f, to!(v)?)); + } + + Ok(MultipleZaddValues { values }) + } +} + +impl TryFrom> for MultipleZaddValues +where + T: TryInto, + T::Error: Into, +{ + type Error = RedisError; + + fn try_from(d: VecDeque<(f64, T)>) -> Result { + let mut values = Vec::with_capacity(d.len()); + for (f, v) in d.into_iter() { + values.push((f, to!(v)?)); + } + + Ok(MultipleZaddValues { values }) + } +} + +/// Ordering options for the ZADD (and related) commands. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum Ordering { + GreaterThan, + LessThan, +} + +impl Ordering { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + Ordering::GreaterThan => "GT", + Ordering::LessThan => "LT", + }) + } +} + +/// Options for the ZRANGE (and related) commands. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ZSort { + ByScore, + ByLex, +} + +impl ZSort { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ZSort::ByScore => "BYSCORE", + ZSort::ByLex => "BYLEX", + }) + } +} + +/// An index, score, lexicographical, or +|-|+inf|-inf range bound for the ZRANGE command. +#[derive(Clone, Debug)] +pub enum ZRangeBound { + /// Index ranges () + Index(i64), + /// Score ranges () + Score(f64), + /// Lexicographical ranges () + Lex(String), + /// Shortcut for the `+` character. + InfiniteLex, + /// Shortcut for the `-` character. + NegInfinityLex, + /// Shortcut for the `+inf` range bound. + InfiniteScore, + /// Shortcut for the `-inf` range bound. + NegInfiniteScore, +} + +impl From for ZRangeBound { + fn from(i: i64) -> Self { + ZRangeBound::Index(i) + } +} + +impl<'a> From<&'a str> for ZRangeBound { + fn from(s: &'a str) -> Self { + if s == "+inf" { + ZRangeBound::InfiniteScore + } else if s == "-inf" { + ZRangeBound::NegInfiniteScore + } else { + ZRangeBound::Lex(s.to_owned()) + } + } +} + +impl From for ZRangeBound { + fn from(s: String) -> Self { + if s == "+inf" { + ZRangeBound::InfiniteScore + } else if s == "-inf" { + ZRangeBound::NegInfiniteScore + } else { + ZRangeBound::Lex(s) + } + } +} + +impl<'a> From<&'a String> for ZRangeBound { + fn from(s: &'a String) -> Self { + s.as_str().into() + } +} + +impl TryFrom for ZRangeBound { + type Error = RedisError; + + fn try_from(f: f64) -> Result { + let value = if f.is_infinite() && f.is_sign_negative() { + ZRangeBound::NegInfiniteScore + } else if f.is_infinite() { + ZRangeBound::InfiniteScore + } else if f.is_nan() { + return Err(RedisError::new( + RedisErrorKind::Unknown, + "Cannot use NaN as zrange field.", + )); + } else { + ZRangeBound::Score(f) + }; + + Ok(value) + } +} + +/// The type of range interval bound. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ZRangeKind { + Inclusive, + Exclusive, +} + +impl Default for ZRangeKind { + fn default() -> Self { + ZRangeKind::Inclusive + } +} + +/// A wrapper struct for a range bound in a sorted set command. +#[derive(Clone, Debug)] +pub struct ZRange { + pub kind: ZRangeKind, + pub range: ZRangeBound, +} + +impl ZRange { + pub(crate) fn into_value(self) -> Result { + let value = if self.kind == ZRangeKind::Exclusive { + match self.range { + ZRangeBound::Index(i) => format!("({}", i).into(), + ZRangeBound::Score(f) => utils::f64_to_zrange_bound(f, &self.kind)?.into(), + ZRangeBound::Lex(s) => utils::check_lex_str(s, &self.kind).into(), + ZRangeBound::InfiniteLex => RedisValue::from_static_str("+"), + ZRangeBound::NegInfinityLex => RedisValue::from_static_str("-"), + ZRangeBound::InfiniteScore => RedisValue::from_static_str("+inf"), + ZRangeBound::NegInfiniteScore => RedisValue::from_static_str("-inf"), + } + } else { + match self.range { + ZRangeBound::Index(i) => i.into(), + ZRangeBound::Score(f) => f.try_into()?, + ZRangeBound::Lex(s) => utils::check_lex_str(s, &self.kind).into(), + ZRangeBound::InfiniteLex => RedisValue::from_static_str("+"), + ZRangeBound::NegInfinityLex => RedisValue::from_static_str("-"), + ZRangeBound::InfiniteScore => RedisValue::from_static_str("+inf"), + ZRangeBound::NegInfiniteScore => RedisValue::from_static_str("-inf"), + } + }; + + Ok(value) + } +} + +impl From for ZRange { + fn from(i: i64) -> Self { + ZRange { + kind: ZRangeKind::default(), + range: i.into(), + } + } +} + +impl<'a> From<&'a str> for ZRange { + fn from(s: &'a str) -> Self { + ZRange { + kind: ZRangeKind::default(), + range: s.into(), + } + } +} + +impl From for ZRange { + fn from(s: String) -> Self { + ZRange { + kind: ZRangeKind::default(), + range: s.into(), + } + } +} + +impl<'a> From<&'a String> for ZRange { + fn from(s: &'a String) -> Self { + ZRange { + kind: ZRangeKind::default(), + range: s.as_str().into(), + } + } +} + +impl TryFrom for ZRange { + type Error = RedisError; + + fn try_from(f: f64) -> Result { + Ok(ZRange { + kind: ZRangeKind::default(), + range: f.try_into()?, + }) + } +} + +impl<'a> From<&'a ZRange> for ZRange { + fn from(range: &'a ZRange) -> Self { + range.clone() + } +} diff --git a/src/types/streams.rs b/src/types/streams.rs new file mode 100644 index 00000000..65a232fa --- /dev/null +++ b/src/types/streams.rs @@ -0,0 +1,486 @@ +use crate::commands::{MAXLEN, MINID}; +use crate::error::{RedisError, RedisErrorKind}; +use crate::types::{LimitCount, RedisKey, RedisValue, StringOrNumber}; +use crate::utils; +use bytes_utils::Str; +use std::collections::{HashMap, VecDeque}; +use std::convert::{TryFrom, TryInto}; + +/// Representation for the "=" or "~" operator in `XADD`, etc. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum XCapTrim { + Exact, + AlmostExact, +} + +impl XCapTrim { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + XCapTrim::Exact => "=", + XCapTrim::AlmostExact => "~", + }) + } +} + +impl<'a> TryFrom<&'a str> for XCapTrim { + type Error = RedisError; + + fn try_from(s: &'a str) -> Result { + Ok(match s.as_ref() { + "=" => XCapTrim::Exact, + "~" => XCapTrim::AlmostExact, + _ => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Invalid XADD trim value.", + )) + } + }) + } +} + +/// One or more ordered key-value pairs, typically used as an argument for `XADD`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct MultipleOrderedPairs { + values: Vec<(RedisKey, RedisValue)>, +} + +impl MultipleOrderedPairs { + pub fn len(&self) -> usize { + self.values.len() + } + + pub fn inner(self) -> Vec<(RedisKey, RedisValue)> { + self.values + } +} + +impl From<()> for MultipleOrderedPairs { + fn from(_: ()) -> Self { + MultipleOrderedPairs { values: Vec::new() } + } +} + +impl TryFrom<(K, V)> for MultipleOrderedPairs +where + K: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from((key, value): (K, V)) -> Result { + Ok(MultipleOrderedPairs { + values: vec![(key.into(), to!(value)?)], + }) + } +} + +impl TryFrom> for MultipleOrderedPairs +where + K: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from(values: Vec<(K, V)>) -> Result { + Ok(MultipleOrderedPairs { + values: values + .into_iter() + .map(|(key, value)| Ok((key.into(), to!(value)?))) + .collect::, RedisError>>()?, + }) + } +} + +impl TryFrom> for MultipleOrderedPairs +where + K: Into, + V: TryInto, + V::Error: Into, +{ + type Error = RedisError; + + fn try_from(values: VecDeque<(K, V)>) -> Result { + Ok(MultipleOrderedPairs { + values: values + .into_iter() + .map(|(key, value)| Ok((key.into(), to!(value)?))) + .collect::, RedisError>>()?, + }) + } +} + +/// One or more IDs for elements in a stream. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct MultipleIDs { + inner: Vec, +} + +impl MultipleIDs { + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn inner(self) -> Vec { + self.inner + } +} + +impl From for MultipleIDs +where + T: Into, +{ + fn from(value: T) -> Self { + MultipleIDs { + inner: vec![value.into()], + } + } +} + +impl From> for MultipleIDs +where + T: Into, +{ + fn from(value: Vec) -> Self { + MultipleIDs { + inner: value.into_iter().map(|value| value.into()).collect(), + } + } +} + +impl From> for MultipleIDs +where + T: Into, +{ + fn from(value: VecDeque) -> Self { + MultipleIDs { + inner: value.into_iter().map(|value| value.into()).collect(), + } + } +} + +/// The MAXLEN or MINID argument for a stream cap. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum XCapKind { + MaxLen, + MinID, +} + +impl XCapKind { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + XCapKind::MaxLen => MAXLEN, + XCapKind::MinID => MINID, + }) + } +} + +impl<'a> TryFrom<&'a str> for XCapKind { + type Error = RedisError; + + fn try_from(value: &'a str) -> Result { + Ok(match value.as_ref() { + "MAXLEN" => XCapKind::MaxLen, + "MINID" => XCapKind::MinID, + _ => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Expected MAXLEN or MINID,", + )) + } + }) + } +} + +/// Stream cap arguments for `XADD`, `XTRIM`, etc. +/// +/// Equivalent to `[MAXLEN|MINID [=|~] threshold [LIMIT count]]`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct XCap { + inner: Option<(XCapKind, XCapTrim, StringOrNumber, LimitCount)>, +} + +impl XCap { + pub(crate) fn into_parts(self) -> Option<(XCapKind, XCapTrim, StringOrNumber, LimitCount)> { + self.inner + } +} + +impl From> for XCap { + fn from(_: Option<()>) -> Self { + XCap { inner: None } + } +} + +impl TryFrom<(K, T, S, Option)> for XCap +where + K: TryInto, + K::Error: Into, + T: TryInto, + T::Error: Into, + S: Into, +{ + type Error = RedisError; + + fn try_from((kind, trim, threshold, limit): (K, T, S, Option)) -> Result { + let (kind, trim) = (to!(kind)?, to!(trim)?); + Ok(XCap { + inner: Some((kind, trim, threshold.into(), limit)), + }) + } +} + +impl TryFrom<(K, T, S)> for XCap +where + K: TryInto, + K::Error: Into, + T: TryInto, + T::Error: Into, + S: Into, +{ + type Error = RedisError; + + fn try_from((kind, trim, threshold): (K, T, S)) -> Result { + let (kind, trim) = (to!(kind)?, to!(trim)?); + Ok(XCap { + inner: Some((kind, trim, threshold.into(), None)), + }) + } +} + +impl TryFrom<(K, S)> for XCap +where + K: TryInto, + K::Error: Into, + S: Into, +{ + type Error = RedisError; + + fn try_from((kind, threshold): (K, S)) -> Result { + let kind = to!(kind)?; + Ok(XCap { + inner: Some((kind, XCapTrim::Exact, threshold.into(), None)), + }) + } +} + +/// Stream ID arguments for `XADD`, `XREAD`, etc. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum XID { + /// The auto-generated key symbol "*". + Auto, + /// An ID specified by the user such as "12345-0". + Manual(Str), + /// The highest ID in a stream ("$"). + Max, + /// For `XREADGROUP`, only return new IDs (">"). + NewInGroup, +} + +impl XID { + pub(crate) fn into_str(self) -> Str { + match self { + XID::Auto => utils::static_str("*"), + XID::Max => utils::static_str("$"), + XID::NewInGroup => utils::static_str(">"), + XID::Manual(s) => s.into(), + } + } +} + +impl<'a> From<&'a str> for XID { + fn from(value: &'a str) -> Self { + match value.as_ref() { + "*" => XID::Auto, + "$" => XID::Max, + ">" => XID::NewInGroup, + _ => XID::Manual(value.into()), + } + } +} + +impl<'a> From<&'a String> for XID { + fn from(value: &'a String) -> Self { + match value.as_ref() { + "*" => XID::Auto, + "$" => XID::Max, + ">" => XID::NewInGroup, + _ => XID::Manual(value.into()), + } + } +} + +impl From for XID { + fn from(value: String) -> Self { + match value.as_ref() { + "*" => XID::Auto, + "$" => XID::Max, + ">" => XID::NewInGroup, + _ => XID::Manual(value.into()), + } + } +} + +impl From for XID { + fn from(value: Str) -> Self { + match &*value { + "*" => XID::Auto, + "$" => XID::Max, + ">" => XID::NewInGroup, + _ => XID::Manual(value), + } + } +} + +/// A struct representing the trailing optional arguments to [XPENDING](https://redis.io/commands/xpending). +/// +/// See the `From` implementations for various shorthand representations of these arguments. Callers should use `()` to represent no arguments. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct XPendingArgs { + pub idle: Option, + pub start: Option, + pub end: Option, + pub count: Option, + pub consumer: Option, +} + +impl XPendingArgs { + pub(crate) fn into_parts(self) -> Result, XID, XID, u64, Option)>, RedisError> { + let is_empty = self.idle.is_none() + && self.start.is_none() + && self.end.is_none() + && self.count.is_none() + && self.consumer.is_none(); + + if is_empty { + Ok(None) + } else { + let start = match self.start { + Some(s) => s, + None => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "The `start` argument is required in this context.", + )) + } + }; + let end = match self.end { + Some(s) => s, + None => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "The `end` argument is required in this context.", + )) + } + }; + let count = match self.count { + Some(s) => s, + None => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "The `count` argument is required in this context.", + )) + } + }; + + Ok(Some((self.idle, start, end, count, self.consumer))) + } + } +} + +impl From<()> for XPendingArgs { + fn from(_: ()) -> Self { + XPendingArgs { + idle: None, + start: None, + end: None, + count: None, + consumer: None, + } + } +} + +impl From<(S, E, u64)> for XPendingArgs +where + S: Into, + E: Into, +{ + fn from((start, end, count): (S, E, u64)) -> Self { + XPendingArgs { + idle: None, + start: Some(start.into()), + end: Some(end.into()), + count: Some(count), + consumer: None, + } + } +} + +impl From<(S, E, u64, C)> for XPendingArgs +where + S: Into, + E: Into, + C: Into, +{ + fn from((start, end, count, consumer): (S, E, u64, C)) -> Self { + XPendingArgs { + idle: None, + start: Some(start.into()), + end: Some(end.into()), + count: Some(count), + consumer: Some(consumer.into()), + } + } +} + +impl From<(u64, S, E, u64)> for XPendingArgs +where + S: Into, + E: Into, +{ + fn from((idle, start, end, count): (u64, S, E, u64)) -> Self { + XPendingArgs { + idle: Some(idle), + start: Some(start.into()), + end: Some(end.into()), + count: Some(count), + consumer: None, + } + } +} + +impl From<(u64, S, E, u64, C)> for XPendingArgs +where + S: Into, + E: Into, + C: Into, +{ + fn from((idle, start, end, count, consumer): (u64, S, E, u64, C)) -> Self { + XPendingArgs { + idle: Some(idle), + start: Some(start.into()), + end: Some(end.into()), + count: Some(count), + consumer: Some(consumer.into()), + } + } +} + +/// A generic helper type describing the ID and associated map for each record in a stream. +/// +/// See the [XReadResponse](crate::types::XReadResponse) type for more information. +pub type XReadValue = (I, HashMap); +/// A generic helper type describing the top level response from `XREAD` or `XREADGROUP`. +/// +/// See the [xread](crate::interfaces::StreamsInterface::xread) documentation for more information. +/// +/// The inner type declarations refer to the following: +/// * K1 - The type of the outer Redis key for the stream. Usually a `String` or `RedisKey`. +/// * I - The type of the ID for a stream record ("abc-123"). This is usually a `String`. +/// * K2 - The type of key in the map associated with each stream record. +/// * V - The type of value in the map associated with each stream record. +/// +/// To support heterogeneous values in the map describing each stream element it is recommended to declare the last type as `RedisValue` and [convert](crate::types::RedisValue::convert) as needed. +pub type XReadResponse = HashMap>>; diff --git a/src/utils.rs b/src/utils.rs index 0499bbf6..2e03ff8b 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -5,16 +5,17 @@ use crate::multiplexer::utils as multiplexer_utils; use crate::multiplexer::{sentinel, ConnectionIDs}; use crate::protocol::types::{RedisCommand, RedisCommandKind}; use crate::types::*; +use bytes::Bytes; +use bytes_utils::Str; use float_cmp::approx_eq; use futures::future::{select, Either}; use futures::{pin_mut, Future}; use parking_lot::RwLock; use rand::distributions::Alphanumeric; use rand::{self, Rng}; -use redis_protocol::resp2::types::Frame as ProtocolFrame; +use redis_protocol::resp3::types::Frame as Resp3Frame; use std::collections::HashMap; use std::convert::TryInto; -use std::hash::Hasher; use std::ops::DerefMut; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -24,20 +25,28 @@ use tokio::sync::oneshot::{channel as oneshot_channel, Receiver as OneshotReceiv use tokio::sync::RwLock as AsyncRwLock; use tokio::time::sleep; -#[cfg(feature = "index-map")] -use indexmap::map::IndexMap; -#[cfg(feature = "index-map")] -use std::hash::Hash; - #[cfg(any(feature = "full-tracing", feature = "partial-tracing"))] use crate::protocol::utils as protocol_utils; #[cfg(any(feature = "full-tracing", feature = "partial-tracing"))] use crate::trace; #[cfg(any(feature = "full-tracing", feature = "partial-tracing"))] use futures::TryFutureExt; +#[cfg(feature = "serde-json")] +use serde_json::Value; #[cfg(any(feature = "full-tracing", feature = "partial-tracing"))] use tracing_futures::Instrument; +/// Create a `Str` from a static str slice without copying. +pub fn static_str(s: &'static str) -> Str { + // it's already parsed as a string + unsafe { Str::from_inner_unchecked(Bytes::from_static(s.as_bytes())) } +} + +/// Create a `Bytes` from static bytes without copying. +pub fn static_bytes(b: &'static [u8]) -> Bytes { + Bytes::from_static(b) +} + pub fn is_clustered(config: &RwLock) -> bool { config.read().server.is_clustered() } @@ -80,9 +89,9 @@ pub fn redis_string_to_f64(s: &str) -> Result { /// Convert an `f64` to a redis string, supporting "+inf" and "-inf". pub fn f64_to_redis_string(d: f64) -> Result { if d.is_infinite() && d.is_sign_negative() { - Ok("-inf".into()) + Ok(RedisValue::from_static_str("-inf")) } else if d.is_infinite() { - Ok("+inf".into()) + Ok(RedisValue::from_static_str("+inf")) } else if d.is_nan() { Err(RedisError::new( RedisErrorKind::InvalidArgument, @@ -115,7 +124,7 @@ pub fn incr_with_max(curr: u32, max: u32) -> Option { if max != 0 && curr >= max { None } else { - Some(curr + 1) + Some(curr.saturating_add(1)) } } @@ -194,6 +203,14 @@ pub fn check_and_set_client_state( } } +/// Check and set the inner locked value by updating `locked` with `new_value`, returning the old value. +pub fn check_and_set_bool(locked: &RwLock, new_value: bool) -> bool { + let mut guard = locked.write(); + let old_value = *guard; + *guard = new_value; + old_value +} + pub fn read_centralized_server(inner: &Arc) -> Option> { match inner.config.read().server { ServerConfig::Centralized { ref host, ref port } => Some(Arc::new(format!("{}:{}", host, port))), @@ -202,43 +219,6 @@ pub fn read_centralized_server(inner: &Arc) -> Option IndexMap { - if capacity == 0 { - IndexMap::new() - } else { - IndexMap::with_capacity(capacity) - } -} - -#[cfg(not(feature = "index-map"))] -pub fn new_map(capacity: usize) -> HashMap { - if capacity == 0 { - HashMap::new() - } else { - HashMap::with_capacity(capacity) - } -} - -#[cfg(feature = "index-map")] -pub fn hash_map(data: &IndexMap, state: &mut H) -where - H: Hasher, -{ - for (key, value) in data.iter() { - key.hash(state); - value.hash(state); - } -} - -#[cfg(not(feature = "index-map"))] -pub fn hash_map(_data: &HashMap, _state: &mut H) -where - H: Hasher, -{ - panic!("Cannot use HashMap as hash key."); -} - pub fn decr_atomic(size: &Arc) -> usize { size.fetch_sub(1, Ordering::AcqRel).saturating_sub(1) } @@ -409,7 +389,7 @@ pub fn send_command(inner: &Arc, command: RedisCommand) -> Res decr_atomic(&inner.cmd_buffer_len); if let Some(tx) = e.0.tx.take() { if let Err(_) = tx.send(Err(RedisError::new(RedisErrorKind::Unknown, "Failed to send command."))) { - _error!(inner, "Failed to send command {:?}.", e.0.extract_key()); + _error!(inner, "Failed to send command to multiplexer {:?}.", e.0.kind); } } } @@ -437,10 +417,10 @@ where } async fn wait_for_response( - rx: OneshotReceiver>, -) -> Result { - let sleep_duration = globals().default_command_timeout(); - apply_timeout(rx, sleep_duration as u64).await? + rx: OneshotReceiver>, + timeout: u64, +) -> Result { + apply_timeout(rx, timeout).await? } fn has_blocking_error_policy(inner: &Arc) -> bool { @@ -473,7 +453,7 @@ pub async fn interrupt_blocked_connection( } }; - backchannel_request_response(inner, move || { + backchannel_request_response(inner, true, move || { Ok(( RedisCommandKind::ClientUnblock, vec![connection_id.into(), flag.to_str().into()], @@ -506,7 +486,7 @@ async fn check_blocking_policy(inner: &Arc, command: &RedisCom Ok(()) } -pub async fn basic_request_response(inner: &Arc, func: F) -> Result +pub async fn basic_request_response(inner: &Arc, func: F) -> Result where F: FnOnce() -> Result<(RedisCommandKind, Vec), RedisError>, { @@ -518,11 +498,11 @@ where let _ = disallow_nested_values(&command)?; let _ = send_command(&inner, command)?; - wait_for_response(rx).await + wait_for_response(rx, inner.perf_config.default_command_timeout() as u64).await } #[cfg(any(feature = "full-tracing", feature = "partial-tracing"))] -pub async fn request_response(inner: &Arc, func: F) -> Result +pub async fn request_response(inner: &Arc, func: F) -> Result where F: FnOnce() -> Result<(RedisCommandKind, Vec), RedisError>, { @@ -547,9 +527,6 @@ where let _ = disallow_nested_values(&command)?; (command, rx, req_size) }; - if let Some(key) = command.extract_key() { - cmd_span.record("key", &&*key); - } cmd_span.record("cmd", &command.kind.to_str_debug()); cmd_span.record("req_size", &req_size); @@ -559,7 +536,7 @@ where let _ = check_blocking_policy(inner, &command).await?; let _ = send_command(&inner, command)?; - wait_for_response(rx) + wait_for_response(rx, inner.perf_config.default_command_timeout() as u64) .and_then(|frame| async move { trace::record_response_size(&end_cmd_span, &frame); Ok::<_, RedisError>(frame) @@ -569,7 +546,7 @@ where } #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] -pub async fn request_response(inner: &Arc, func: F) -> Result +pub async fn request_response(inner: &Arc, func: F) -> Result where F: FnOnce() -> Result<(RedisCommandKind, Vec), RedisError>, { @@ -645,8 +622,9 @@ fn find_backchannel_server(inner: &Arc, command: &RedisCommand pub async fn backchannel_request_response( inner: &Arc, + use_blocked: bool, func: F, -) -> Result +) -> Result where F: FnOnce() -> Result<(RedisCommandKind, Vec), RedisError>, { @@ -669,7 +647,7 @@ where .backchannel .write() .await - .request_response(inner, blocked_server, command) + .request_response(inner, blocked_server, command, use_blocked) .await } else { // otherwise no connections are blocked @@ -685,7 +663,7 @@ where .backchannel .write() .await - .request_response(inner, &server, command) + .request_response(inner, &server, command, use_blocked) .await } } @@ -718,7 +696,6 @@ pub async fn read_connection_ids(inner: &Arc) -> Option) -> Result<(Vec<(String, u16)>, String), RedisError> { match inner.config.read().server { #[cfg(not(feature = "sentinel-auth"))] @@ -852,3 +829,153 @@ where Ok(out) } + +pub fn add_jitter(delay: u64, jitter: u32) -> u64 { + delay.saturating_add(rand::thread_rng().gen_range(0..jitter as u64)) +} + +pub fn into_redis_map(mut iter: I) -> Result, RedisError> +where + I: Iterator, + K: TryInto, + K::Error: Into, + V: TryInto, + V::Error: Into, +{ + let (lower, upper) = iter.size_hint(); + let capacity = if let Some(upper) = upper { upper } else { lower }; + let mut out = HashMap::with_capacity(capacity); + + while let Some((key, value)) = iter.next() { + out.insert(to!(key)?, to!(value)?); + } + Ok(out) +} + +#[cfg(feature = "serde-json")] +pub fn parse_nested_json(s: &str) -> Option { + let trimmed = s.trim(); + let is_maybe_json = + (trimmed.starts_with("{") && trimmed.ends_with("}")) || (trimmed.starts_with("[") && trimmed.ends_with("]")); + + if is_maybe_json { + serde_json::from_str(s).ok() + } else { + None + } +} + +pub fn flatten_nested_array_values(value: RedisValue, depth: usize) -> RedisValue { + if depth == 0 { + return value; + } + + match value { + RedisValue::Array(values) => { + let inner_size = values.iter().fold(0, |s, v| s + v.array_len().unwrap_or(1)); + let mut out = Vec::with_capacity(inner_size); + + for value in values.into_iter() { + match value { + RedisValue::Array(inner) => { + for value in inner.into_iter() { + out.push(flatten_nested_array_values(value, depth - 1)); + } + } + _ => out.push(value), + } + } + RedisValue::Array(out) + } + RedisValue::Map(values) => { + let mut out = HashMap::with_capacity(values.len()); + + for (key, value) in values.inner().into_iter() { + let value = if value.is_array() { + flatten_nested_array_values(value, depth - 1) + } else { + value + }; + + out.insert(key, value); + } + RedisValue::Map(RedisMap { inner: out }) + } + _ => value, + } +} + +pub fn is_maybe_array_map(arr: &Vec) -> bool { + if arr.len() > 0 && arr.len() % 2 == 0 { + arr.chunks(2).fold(true, |b, chunk| b && !chunk[0].is_aggregate_type()) + } else { + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::error::RedisError; + use crate::types::RedisValue; + use std::convert::TryInto; + use std::fmt::Debug; + + fn m(v: V) -> RedisValue + where + V: TryInto + Debug, + V::Error: Into + Debug, + { + v.try_into().unwrap() + } + + fn a(v: Vec) -> RedisValue { + RedisValue::Array(v) + } + + #[test] + fn should_flatten_xread_example() { + // 127.0.0.1:6379> xread count 2 streams foo bar 1643479648480-0 1643479834990-0 + // 1) 1) "foo" + // 2) 1) 1) "1643479650336-0" + // 2) 1) "count" + // 2) "3" + // 2) 1) "bar" + // 2) 1) 1) "1643479837746-0" + // 2) 1) "count" + // 2) "5" + // 2) 1) "1643479925582-0" + // 2) 1) "count" + // 2) "6" + let actual: RedisValue = vec![ + a(vec![ + m("foo"), + a(vec![a(vec![m("1643479650336-0"), a(vec![m("count"), m(3)])])]), + ]), + a(vec![ + m("bar"), + a(vec![ + a(vec![m("1643479837746-0"), a(vec![m("count"), m(5)])]), + a(vec![m("1643479925582-0"), a(vec![m("count"), m(6)])]), + ]), + ]), + ] + .into_iter() + .collect(); + + // flatten the top level nested array into something that can be cast to a map + let expected: RedisValue = vec![ + m("foo"), + a(vec![a(vec![m("1643479650336-0"), a(vec![m("count"), m(3)])])]), + m("bar"), + a(vec![ + a(vec![m("1643479837746-0"), a(vec![m("count"), m(5)])]), + a(vec![m("1643479925582-0"), a(vec![m("count"), m(6)])]), + ]), + ] + .into_iter() + .collect(); + + assert_eq!(flatten_nested_array_values(actual, 1), expected); + } +} diff --git a/tests/README.md b/tests/README.md index 504fda9b..2ba15640 100644 --- a/tests/README.md +++ b/tests/README.md @@ -2,10 +2,28 @@ Tests are organized by category, similar to the [commands](../src/commands) folder. -By default, most tests run 4 times based on the following configuration parameters: clustered vs centralized servers and pipelined vs non-pipelined clients. Helper macros exist to make this easy so each test only has to be written once. +By default, most tests run 8 times based on the following configuration parameters: clustered vs centralized servers, pipelined vs non-pipelined clients, and RESP2 vs RESP3 mode. Helper macros exist to make this easy so each test only has to be written once. **The tests require Redis version >=6.2** +## Makefile + +The easiest way to set up the tests is to use the [Makefile](../Makefile): + +``` +# setup +make clean +make install + +# run tests +make test-default-features +make test-all-features +make test-no-features +make test-sentinel-features +``` + +The make commands are thin wrappers around scripts in the [scripts](scripts) and [runners](runners) folders. The rest of this document will cover how to use those scripts by hand. + ## Installation The [environ](environ) file contains any environment variables that might be needed. **This should be loaded before installing or running any tests, unless otherwise set manually.** @@ -17,7 +35,6 @@ In order to run the installation scripts the following must be installed: * OpenSSL (`libssl-dev`) for testing TLS features without vendored OpenSSL dependencies * `docker` * `docker-compose` (this may come with `docker` depending on the version you use) -* `expect` ### Fresh Installation @@ -54,6 +71,15 @@ See the [CI configuration](../.circleci/config.yml) for more information. Note: the [stop redis script](scripts/stop_all_redis.sh) can stop all local Redis servers, including those started via docker. +There are 4 environment variables that can be used to control the host/port for the centralized or clustered servers used for the tests. The default values can be found in the [environ](./environ) file. + +* FRED_REDIS_CLUSTER_HOST +* FRED_REDIS_CLUSTER_PORT +* FRED_REDIS_CENTRALIZED_HOST +* FRED_REDIS_CENTRALIZED_PORT + +Callers can change these, but need to ensure the ACL rules are properly configured on the servers. A user with the name `$REDIS_USERNAME` and password `$REDIS_PASSWORD` needs full access to run any command. The installation scripts will automatically set these rules, but if callers use a different server they may need to manually create this user. + ## Adding Tests Adding tests is straightforward with the help of some macros and utility functions. @@ -66,7 +92,7 @@ Note: When writing tests that operate on multiple keys be sure to use a [hash_ta 4. Call the test from the appropriate [integration/cluster.rs](integration/cluster.rs) or [integration/centralized.rs](integration/centralized.rs) files, or both. Create a wrapping `mod` block with the same name as the test's folder if necessary. 5. Use `centralized_test!` or `cluster_test!` to generate tests in the appropriate module. Centralized tests will be automatically converted to sentinel tests if using the sentinel testing features. -Tests that use this pattern will run 4 times to check the functionality against a clustered and centralized redis servers using both pipelined and non-pipelined clients. +Tests that use this pattern will run 8 times to check the functionality against clustered and centralized redis servers with using both pipelined and non-pipelined clients in RESP2 and RESP3 mode. ## Chaos Monkey @@ -92,5 +118,4 @@ docker container rm $(docker ps -aq) The following modules still need better test coverage: * ACL commands -* Cluster commands. This one is more complicated though since many of these modify the cluster. -* Client commands \ No newline at end of file +* Cluster commands. This one is more complicated though since many of these modify the cluster. \ No newline at end of file diff --git a/tests/chaos_monkey/mod.rs b/tests/chaos_monkey/mod.rs index ac37d61a..cc847c82 100644 --- a/tests/chaos_monkey/mod.rs +++ b/tests/chaos_monkey/mod.rs @@ -2,10 +2,11 @@ #![allow(dead_code)] #![allow(unused_variables)] -use fred::client::RedisClient; +use fred::clients::RedisClient; use fred::error::{RedisError, RedisErrorKind}; use fred::globals; -use fred::types::{RedisConfig, RedisKey, ServerConfig}; +use fred::interfaces::*; +use fred::types::{PerformanceConfig, RedisConfig, RedisKey, ServerConfig}; use lazy_static::lazy_static; use parking_lot::RwLock; use std::env; @@ -79,14 +80,17 @@ fn env_vars() -> Vec<(OsString, OsString)> { async fn read_foo_src_and_dest() -> Result<(u16, u16), RedisError> { let config = RedisConfig { server: ServerConfig::default_clustered(), - pipeline: false, + performance: PerformanceConfig { + pipeline: false, + ..Default::default() + }, ..Default::default() }; let client = RedisClient::new(config); let _ = client.connect(None); let _ = client.wait_for_connect().await?; - let foo = RedisKey::new("foo"); + let foo = RedisKey::from_static_str("foo"); let owner = match foo.cluster_owner(&client) { Some(server) => server.split(":").skip(1).next().unwrap().parse::()?, None => return Err(RedisError::new(RedisErrorKind::Unknown, "Failed to find owner")), diff --git a/tests/environ b/tests/environ index d4cbb635..ea031f25 100644 --- a/tests/environ +++ b/tests/environ @@ -1,6 +1,22 @@ +#!/bin/bash export RUST_BACKTRACE=full \ REDIS_VERSION=6.2.2 \ REDIS_USERNAME=foo \ REDIS_PASSWORD=bar \ - REDIS_SENTINEL_PASSWORD=baz \ No newline at end of file + REDIS_SENTINEL_PASSWORD=baz \ + FRED_REDIS_CLUSTER_HOST=127.0.0.1 \ + FRED_REDIS_CLUSTER_PORT=30001 \ + FRED_REDIS_CENTRALIZED_HOST=127.0.0.1 \ + FRED_REDIS_CENTRALIZED_PORT=6379 + +alias fred_redis_cli='./tests/tmp/redis_$REDIS_VERSION/redis-$REDIS_VERSION/src/redis-cli "$@"' + +if [ -z "$CIRCLECI_TESTS" ]; then + read -p "Use cargo nextest? [y/n]: " NEXTEST + if [ "$NEXTEST" = "y" ]; then + export FRED_CI_NEXTEST=1 + else + unset FRED_CI_NEXTEST + fi +fi \ No newline at end of file diff --git a/tests/integration/acl/mod.rs b/tests/integration/acl/mod.rs index d2e5ebbe..95e52cbf 100644 --- a/tests/integration/acl/mod.rs +++ b/tests/integration/acl/mod.rs @@ -1,7 +1,8 @@ -use fred::client::RedisClient; -use fred::error::RedisError; -use fred::types::RedisConfig; use super::utils::read_env_var; +use fred::clients::RedisClient; +use fred::error::RedisError; +use fred::interfaces::*; +use fred::types::{AclUserFlag, RedisConfig}; // the docker image we use for sentinel tests doesn't allow for configuring users, just passwords, // so for the tests here we just use an empty username so it uses the `default` user @@ -43,4 +44,11 @@ pub async fn should_auth_as_test_user_via_config(_: RedisClient, mut config: Red } Ok(()) -} \ No newline at end of file +} + +pub async fn should_run_acl_getuser(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let user = client.acl_getuser("default").await?.unwrap(); + assert!(user.flags.contains(&AclUserFlag::On)); + + Ok(()) +} diff --git a/tests/integration/centralized.rs b/tests/integration/centralized.rs index 0eeb1e57..168faad8 100644 --- a/tests/integration/centralized.rs +++ b/tests/integration/centralized.rs @@ -27,7 +27,6 @@ mod multi { centralized_test!(multi, should_run_get_set_trx); centralized_test_panic!(multi, should_run_error_get_set_trx); - centralized_test_panic!(multi, should_fail_with_blocking_cmd); } mod other { @@ -38,12 +37,13 @@ mod other { centralized_test!(other, should_automatically_unblock); centralized_test!(other, should_manually_unblock); centralized_test!(other, should_error_when_blocked); + centralized_test!(other, should_smoke_test_from_redis_impl); + centralized_test!(other, should_safely_change_protocols_repeatedly); } mod pool { centralized_test!(pool, should_connect_and_ping_static_pool_single_conn); centralized_test!(pool, should_connect_and_ping_static_pool_two_conn); - centralized_test!(pool, should_connect_and_ping_dynamic_pool); #[cfg(feature = "fd-tests")] centralized_test!(pool, should_connect_and_ping_static_pool_many_conn); #[cfg(feature = "fd-tests")] @@ -220,4 +220,45 @@ pub mod geo { pub mod acl { centralized_test!(acl, should_auth_as_test_user); centralized_test!(acl, should_auth_as_test_user_via_config); + centralized_test!(acl, should_run_acl_getuser); +} + +mod streams { + centralized_test!(streams, should_xinfo_consumers); + centralized_test!(streams, should_xinfo_groups); + centralized_test!(streams, should_xinfo_streams); + centralized_test!(streams, should_xadd_auto_id_to_a_stream); + centralized_test!(streams, should_xadd_manual_id_to_a_stream); + centralized_test!(streams, should_xadd_with_cap_to_a_stream); + centralized_test!(streams, should_xadd_nomkstream_to_a_stream); + centralized_test!(streams, should_xtrim_a_stream_approx_cap); + centralized_test!(streams, should_xtrim_a_stream_eq_cap); + centralized_test!(streams, should_xdel_one_id_in_a_stream); + centralized_test!(streams, should_xdel_multiple_ids_in_a_stream); + centralized_test!(streams, should_xrange_no_count); + centralized_test!(streams, should_xrange_with_count); + centralized_test!(streams, should_xrange_values_no_count); + centralized_test!(streams, should_xrevrange_no_count); + centralized_test!(streams, should_xrevrange_with_count); + centralized_test!(streams, should_xrevrange_values_no_count); + centralized_test!(streams, should_run_xlen_on_stream); + centralized_test!(streams, should_xread_one_key_count_1); + centralized_test!(streams, should_xread_map_one_key); + centralized_test!(streams, should_xread_multiple_keys_count_2); + centralized_test!(streams, should_xread_with_blocking); + centralized_test!(streams, should_xgroup_create_no_mkstream); + centralized_test!(streams, should_xgroup_create_mkstream); + centralized_test!(streams, should_xgroup_createconsumer); + centralized_test!(streams, should_xgroup_delconsumer); + centralized_test!(streams, should_xgroup_destroy); + centralized_test!(streams, should_xgroup_setid); + centralized_test!(streams, should_xreadgroup_one_stream); + centralized_test!(streams, should_xreadgroup_multiple_stream); + centralized_test!(streams, should_xreadgroup_block); + centralized_test!(streams, should_xack_one_id); + centralized_test!(streams, should_xack_multiple_ids); + centralized_test!(streams, should_xclaim_one_id); + centralized_test!(streams, should_xclaim_multiple_ids); + centralized_test!(streams, should_xclaim_with_justid); + centralized_test!(streams, should_xautoclaim_default); } diff --git a/tests/integration/clustered.rs b/tests/integration/clustered.rs index 2b787ba8..d4d9789f 100644 --- a/tests/integration/clustered.rs +++ b/tests/integration/clustered.rs @@ -28,7 +28,6 @@ mod multi { cluster_test!(multi, should_run_get_set_trx); cluster_test_panic!(multi, should_fail_with_hashslot_error); cluster_test_panic!(multi, should_run_error_get_set_trx); - cluster_test_panic!(multi, should_fail_with_blocking_cmd); cluster_test!(multi, should_use_cluster_slot_with_publish); } @@ -42,12 +41,12 @@ mod other { cluster_test!(other, should_automatically_unblock); cluster_test!(other, should_manually_unblock); cluster_test!(other, should_error_when_blocked); + cluster_test!(other, should_safely_change_protocols_repeatedly); } mod pool { cluster_test!(pool, should_connect_and_ping_static_pool_single_conn); cluster_test!(pool, should_connect_and_ping_static_pool_two_conn); - cluster_test!(pool, should_connect_and_ping_dynamic_pool); #[cfg(feature = "fd-tests")] cluster_test!(pool, should_connect_and_ping_static_pool_many_conn); #[cfg(feature = "fd-tests")] @@ -223,3 +222,47 @@ pub mod geo { cluster_test!(geo, should_georadiusbymember_values); cluster_test!(geo, should_geosearch_values); } + +pub mod acl { + cluster_test!(acl, should_run_acl_getuser); +} + +mod streams { + cluster_test!(streams, should_xinfo_consumers); + cluster_test!(streams, should_xinfo_groups); + cluster_test!(streams, should_xinfo_streams); + cluster_test!(streams, should_xadd_auto_id_to_a_stream); + cluster_test!(streams, should_xadd_manual_id_to_a_stream); + cluster_test!(streams, should_xadd_with_cap_to_a_stream); + cluster_test!(streams, should_xadd_nomkstream_to_a_stream); + cluster_test!(streams, should_xtrim_a_stream_approx_cap); + cluster_test!(streams, should_xtrim_a_stream_eq_cap); + cluster_test!(streams, should_xdel_one_id_in_a_stream); + cluster_test!(streams, should_xdel_multiple_ids_in_a_stream); + cluster_test!(streams, should_xrange_no_count); + cluster_test!(streams, should_xrange_with_count); + cluster_test!(streams, should_xrange_values_no_count); + cluster_test!(streams, should_xrevrange_no_count); + cluster_test!(streams, should_xrevrange_with_count); + cluster_test!(streams, should_xrevrange_values_no_count); + cluster_test!(streams, should_run_xlen_on_stream); + cluster_test!(streams, should_xread_one_key_count_1); + cluster_test!(streams, should_xread_multiple_keys_count_2); + cluster_test!(streams, should_xread_with_blocking); + cluster_test!(streams, should_xread_map_one_key); + cluster_test!(streams, should_xgroup_create_no_mkstream); + cluster_test!(streams, should_xgroup_create_mkstream); + cluster_test!(streams, should_xgroup_createconsumer); + cluster_test!(streams, should_xgroup_delconsumer); + cluster_test!(streams, should_xgroup_destroy); + cluster_test!(streams, should_xgroup_setid); + cluster_test!(streams, should_xreadgroup_one_stream); + cluster_test!(streams, should_xreadgroup_multiple_stream); + cluster_test!(streams, should_xreadgroup_block); + cluster_test!(streams, should_xack_one_id); + cluster_test!(streams, should_xack_multiple_ids); + cluster_test!(streams, should_xclaim_one_id); + cluster_test!(streams, should_xclaim_multiple_ids); + cluster_test!(streams, should_xclaim_with_justid); + cluster_test!(streams, should_xautoclaim_default); +} diff --git a/tests/integration/geo/mod.rs b/tests/integration/geo/mod.rs index 372dfa54..dd18dc79 100644 --- a/tests/integration/geo/mod.rs +++ b/tests/integration/geo/mod.rs @@ -1,4 +1,5 @@ use fred::prelude::*; +use fred::types::{GeoPosition, GeoRadiusInfo, GeoUnit, GeoValue, SortOrder}; use std::convert::TryInto; fn loose_eq(lhs: f64, rhs: f64, precision: u32) -> bool { diff --git a/tests/integration/hashes/mod.rs b/tests/integration/hashes/mod.rs index cf6ec45d..b3a8a4c8 100644 --- a/tests/integration/hashes/mod.rs +++ b/tests/integration/hashes/mod.rs @@ -1,7 +1,7 @@ -use fred::client::RedisClient; +use fred::clients::RedisClient; use fred::error::RedisError; -use fred::types::{RedisConfig, RedisMap, RedisValue}; - +use fred::interfaces::*; +use fred::types::{RedisConfig, RedisValue}; use std::collections::{HashMap, HashSet}; fn assert_contains<'a, T: Eq + PartialEq>(values: Vec, item: &'a T) { @@ -36,9 +36,9 @@ fn assert_diff_len(values: Vec<&'static str>, value: RedisValue, len: usize) { pub async fn should_hset_and_hget(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let result: i64 = client.hset("foo", ("a", 1.into())).await?; + let result: i64 = client.hset("foo", ("a", 1)).await?; assert_eq!(result, 1); - let result: i64 = client.hset("foo", vec![("b", 2.into()), ("c", 3.into())]).await?; + let result: i64 = client.hset("foo", vec![("b", 2), ("c", 3)]).await?; assert_eq!(result, 2); let a: i64 = client.hget("foo", "a").await?; @@ -54,9 +54,7 @@ pub async fn should_hset_and_hget(client: RedisClient, _: RedisConfig) -> Result pub async fn should_hset_and_hdel(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let result: i64 = client - .hset("foo", vec![("a", 1.into()), ("b", 2.into()), ("c", 3.into())]) - .await?; + let result: i64 = client.hset("foo", vec![("a", 1.into()), ("b", 2), ("c", 3)]).await?; assert_eq!(result, 3); let result: i64 = client.hdel("foo", vec!["a", "b"]).await?; assert_eq!(result, 2); @@ -71,7 +69,7 @@ pub async fn should_hset_and_hdel(client: RedisClient, _: RedisConfig) -> Result pub async fn should_hexists(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let _: () = client.hset("foo", ("a", 1.into())).await?; + let _: () = client.hset("foo", ("a", 1)).await?; let a: bool = client.hexists("foo", "a").await?; assert!(a); let b: bool = client.hexists("foo", "b").await?; @@ -83,9 +81,7 @@ pub async fn should_hexists(client: RedisClient, _: RedisConfig) -> Result<(), R pub async fn should_hgetall(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let _: () = client - .hset("foo", vec![("a", 1.into()), ("b", 2.into()), ("c", 3.into())]) - .await?; + let _: () = client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let values: HashMap = client.hgetall("foo").await?; assert_eq!(values.len(), 3); @@ -123,9 +119,7 @@ pub async fn should_hincryby_float(client: RedisClient, _: RedisConfig) -> Resul pub async fn should_get_keys(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let _: () = client - .hset("foo", vec![("a", 1.into()), ("b", 2.into()), ("c", 3.into())]) - .await?; + let _: () = client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let keys = client.hkeys("foo").await?; assert_diff_len(vec!["a", "b", "c"], keys, 0); @@ -136,9 +130,7 @@ pub async fn should_get_keys(client: RedisClient, _: RedisConfig) -> Result<(), pub async fn should_hmset(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let _: () = client - .hmset("foo", vec![("a", 1.into()), ("b", 2.into()), ("c", 3.into())]) - .await?; + let _: () = client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let a: i64 = client.hget("foo", "a").await?; assert_eq!(a, 1); @@ -153,9 +145,7 @@ pub async fn should_hmset(client: RedisClient, _: RedisConfig) -> Result<(), Red pub async fn should_hmget(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let _: () = client - .hmset("foo", vec![("a", 1.into()), ("b", 2.into()), ("c", 3.into())]) - .await?; + let _: () = client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let result: Vec = client.hmget("foo", vec!["a", "b"]).await?; assert_eq!(result, vec![1, 2]); @@ -166,7 +156,7 @@ pub async fn should_hmget(client: RedisClient, _: RedisConfig) -> Result<(), Red pub async fn should_hsetnx(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let _: () = client.hset("foo", ("a", 1.into())).await?; + let _: () = client.hset("foo", ("a", 1)).await?; let result: bool = client.hsetnx("foo", "a", 2).await?; assert_eq!(result, false); let result: i64 = client.hget("foo", "a").await?; @@ -182,9 +172,7 @@ pub async fn should_hsetnx(client: RedisClient, _: RedisConfig) -> Result<(), Re pub async fn should_get_random_field(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let _: () = client - .hmset("foo", vec![("a", 1.into()), ("b", 2.into()), ("c", 3.into())]) - .await?; + let _: () = client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let field: String = client.hrandfield("foo", None).await?; assert_contains(vec!["a", "b", "c"], &field.as_str()); @@ -195,13 +183,13 @@ pub async fn should_get_random_field(client: RedisClient, _: RedisConfig) -> Res let actual: HashMap = client.hrandfield("foo", Some((2, true))).await?; assert_eq!(actual.len(), 2); - let mut expected = RedisMap::new(); - expected.insert("a".into(), "1".into()); - expected.insert("b".into(), "2".into()); - expected.insert("c".into(), "3".into()); + let mut expected: HashMap = HashMap::new(); + expected.insert("a".into(), 1); + expected.insert("b".into(), 2); + expected.insert("c".into(), 3); for (key, value) in actual.into_iter() { - let expected_val: i64 = expected.get(&key).unwrap().clone().convert()?; + let expected_val: i64 = *expected.get(&key).unwrap(); assert_eq!(value, expected_val); } @@ -212,7 +200,7 @@ pub async fn should_get_strlen(client: RedisClient, _: RedisConfig) -> Result<() check_null!(client, "foo"); let expected = "abcdefhijklmnopqrstuvwxyz"; - let _: () = client.hset("foo", ("a", expected.clone().into())).await?; + let _: () = client.hset("foo", ("a", expected)).await?; let len: usize = client.hstrlen("foo", "a").await?; assert_eq!(len, expected.len()); @@ -223,7 +211,7 @@ pub async fn should_get_strlen(client: RedisClient, _: RedisConfig) -> Result<() pub async fn should_get_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_null!(client, "foo"); - let _: () = client.hmset("foo", vec![("a", "1".into()), ("b", "2".into())]).await?; + let _: () = client.hmset("foo", vec![("a", "1"), ("b", "2")]).await?; let values: RedisValue = client.hvals("foo").await?; assert_diff_len(vec!["1", "2"], values, 0); diff --git a/tests/integration/keys/mod.rs b/tests/integration/keys/mod.rs index 06a57d7c..f50b492e 100644 --- a/tests/integration/keys/mod.rs +++ b/tests/integration/keys/mod.rs @@ -1,8 +1,8 @@ -use fred::client::RedisClient; +use fred::clients::RedisClient; use fred::error::RedisError; -use fred::pool::StaticRedisPool; -use fred::prelude::Expiration; -use fred::types::{ReconnectPolicy, RedisConfig, RedisMap, RedisValue}; +use fred::interfaces::*; +use fred::pool::RedisPool; +use fred::types::{Expiration, ReconnectPolicy, RedisConfig, RedisMap, RedisValue}; use futures::pin_mut; use futures::StreamExt; use std::collections::HashMap; @@ -294,7 +294,7 @@ pub async fn should_get_keys_from_pool_in_a_stream( ) -> Result<(), RedisError> { let _ = client.set("foo", "bar", None, None, false).await?; - let pool = StaticRedisPool::new(config, 5)?; + let pool = RedisPool::new(config, 5)?; let _ = pool.connect(Some(ReconnectPolicy::default())); let _ = pool.wait_for_connect().await?; diff --git a/tests/integration/lists/mod.rs b/tests/integration/lists/mod.rs index 4f78fed4..d63c6364 100644 --- a/tests/integration/lists/mod.rs +++ b/tests/integration/lists/mod.rs @@ -1,4 +1,6 @@ +use fred::interfaces::*; use fred::prelude::*; +use fred::types::{LMoveDirection, ListLocation}; use std::time::Duration; use tokio::time::sleep; @@ -238,7 +240,7 @@ pub async fn should_lrange_values(client: RedisClient, _: RedisConfig) -> Result for idx in 0..COUNT { let result: i64 = client.lrange("foo", idx, idx).await?; - assert_eq!(result, idx.into()); + assert_eq!(result, idx); } Ok(()) diff --git a/tests/integration/lua/mod.rs b/tests/integration/lua/mod.rs index 81c4c82c..4795f315 100644 --- a/tests/integration/lua/mod.rs +++ b/tests/integration/lua/mod.rs @@ -1,5 +1,5 @@ -use fred::client::util; use fred::prelude::*; +use fred::util; static ECHO_SCRIPT: &'static str = "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}"; static GET_SCRIPT: &'static str = "return redis.call('get', KEYS[1])"; @@ -51,11 +51,11 @@ pub async fn should_evalsha_get_script(client: RedisClient, _: RedisConfig) -> R let hash = load_script(&client, GET_SCRIPT).await?; assert_eq!(hash, script_hash); - let result: Option = client.evalsha(&script_hash, vec!["foo"], ()).await?; + let result: Option = client.evalsha(&script_hash, vec!["foo"], None).await?; assert!(result.is_none()); let _: () = client.set("foo", "bar", None, None, false).await?; - let result: String = client.evalsha(&script_hash, vec!["foo"], ()).await?; + let result: String = client.evalsha(&script_hash, vec!["foo"], None).await?; assert_eq!(result, "bar"); let _ = flush_scripts(&client).await?; @@ -73,18 +73,18 @@ pub async fn should_eval_echo_script(client: RedisClient, _: RedisConfig) -> Res } pub async fn should_eval_get_script(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let result: Option = client.eval(GET_SCRIPT, vec!["foo"], ()).await?; + let result: Option = client.eval(GET_SCRIPT, vec!["foo"], None).await?; assert!(result.is_none()); let hash = util::sha1_hash(GET_SCRIPT); - let result: Option = client.evalsha(&hash, vec!["foo"], ()).await?; + let result: Option = client.evalsha(&hash, vec!["foo"], None).await?; assert!(result.is_none()); let _: () = client.set("foo", "bar", None, None, false).await?; - let result: String = client.eval(GET_SCRIPT, vec!["foo"], ()).await?; + let result: String = client.eval(GET_SCRIPT, vec!["foo"], None).await?; assert_eq!(result, "bar"); - let result: String = client.evalsha(&hash, vec!["foo"], ()).await?; + let result: String = client.evalsha(&hash, vec!["foo"], None).await?; assert_eq!(result, "bar"); let _ = flush_scripts(&client).await?; diff --git a/tests/integration/mod.rs b/tests/integration/mod.rs index 70ba8ebd..6ed16696 100644 --- a/tests/integration/mod.rs +++ b/tests/integration/mod.rs @@ -20,6 +20,21 @@ mod server; mod sets; mod slowlog; mod sorted_sets; +mod streams; pub mod centralized; pub mod clustered; + +mod macro_tests { + use fred::{b, s}; + + #[test] + fn should_use_static_str_macro() { + let _s = s!("foo"); + } + + #[test] + fn should_use_static_bytes_macro() { + let _b = b!(b"foo"); + } +} diff --git a/tests/integration/multi/mod.rs b/tests/integration/multi/mod.rs index abe3ac45..075ac913 100644 --- a/tests/integration/multi/mod.rs +++ b/tests/integration/multi/mod.rs @@ -1,5 +1,6 @@ -use fred::client::RedisClient; +use fred::clients::RedisClient; use fred::error::RedisError; +use fred::interfaces::*; use fred::types::{RedisConfig, RedisValue}; pub async fn should_run_get_set_trx(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { @@ -32,14 +33,6 @@ pub async fn should_fail_with_hashslot_error(client: RedisClient, _config: Redis Ok(()) } -pub async fn should_fail_with_blocking_cmd(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let trx = client.multi(true).await?; - let _: () = client.blpop("foo", 100.0).await?; - let _: () = trx.exec().await?; - - Ok(()) -} - pub async fn should_use_cluster_slot_with_publish(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let trx = client.multi(true).await?; diff --git a/tests/integration/other/mod.rs b/tests/integration/other/mod.rs index d629266f..50c00d89 100644 --- a/tests/integration/other/mod.rs +++ b/tests/integration/other/mod.rs @@ -1,11 +1,59 @@ -use fred::client::RedisClient; +use fred::clients::RedisClient; use fred::error::{RedisError, RedisErrorKind}; -use fred::prelude::Blocking; -use fred::types::{ClientUnblockFlag, RedisConfig, ServerConfig}; -use std::collections::BTreeSet; +use fred::interfaces::*; +use fred::prelude::{Blocking, RedisValue}; +use fred::types::{ClientUnblockFlag, RedisConfig, RedisKey, RedisMap, ServerConfig}; +use parking_lot::RwLock; +use redis_protocol::resp3::types::RespVersion; +use std::collections::HashMap; +use std::collections::{BTreeMap, BTreeSet}; +use std::convert::TryInto; +use std::mem; +use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; +fn hash_to_btree(vals: &RedisMap) -> BTreeMap { + vals + .iter() + .map(|(key, value)| (key.clone(), value.as_u64().unwrap() as u16)) + .collect() +} + +fn array_to_set(vals: Vec) -> BTreeSet { + vals.into_iter().collect() +} + +pub async fn should_smoke_test_from_redis_impl(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let nested_values: RedisMap = vec![("a", 1), ("b", 2)].try_into()?; + let _ = client.set("foo", "123", None, None, false).await?; + let _ = client.set("baz", "456", None, None, false).await?; + let _ = client.hset("bar", &nested_values).await?; + + let foo: usize = client.get("foo").await?; + assert_eq!(foo, 123); + let foo: i64 = client.get("foo").await?; + assert_eq!(foo, 123); + let foo: String = client.get("foo").await?; + assert_eq!(foo, "123"); + let foo: Vec = client.get("foo").await?; + assert_eq!(foo, "123".as_bytes()); + let foo: Vec = client.hvals("bar").await?; + assert_eq!(array_to_set(foo), array_to_set(vec!["1".to_owned(), "2".to_owned()])); + let foo: BTreeSet = client.hvals("bar").await?; + assert_eq!(foo, array_to_set(vec!["1".to_owned(), "2".to_owned()])); + let foo: HashMap = client.hgetall("bar").await?; + assert_eq!(foo, RedisValue::Map(nested_values.clone()).convert()?); + let foo: BTreeMap = client.hgetall("bar").await?; + assert_eq!(foo, hash_to_btree(&nested_values)); + let foo: (String, i64) = client.mget(vec!["foo", "baz"]).await?; + assert_eq!(foo, ("123".into(), 456)); + let foo: Vec<(String, i64)> = client.hgetall("bar").await?; + assert_eq!(array_to_set(foo), array_to_set(vec![("a".into(), 1), ("b".into(), 2)])); + + Ok(()) +} + pub async fn should_automatically_unblock(_: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { config.blocking = Blocking::Interrupt; let client = RedisClient::new(config); @@ -130,3 +178,38 @@ pub async fn should_run_flushall_cluster(client: RedisClient, _: RedisConfig) -> Ok(()) } + +pub async fn should_safely_change_protocols_repeatedly( + client: RedisClient, + _: RedisConfig, +) -> Result<(), RedisError> { + let done = Arc::new(RwLock::new(false)); + let other = client.clone(); + let other_done = done.clone(); + + let jh = tokio::spawn(async move { + loop { + if *other_done.read() { + return Ok::<_, RedisError>(()); + } + + let _ = other.incr("foo").await?; + sleep(Duration::from_millis(10)).await; + } + }); + + // switch protocols every half second + for idx in 0..20 { + let version = if idx % 2 == 0 { + RespVersion::RESP2 + } else { + RespVersion::RESP3 + }; + let _ = client.hello(version, None).await?; + sleep(Duration::from_millis(500)).await; + } + let _ = mem::replace(&mut *done.write(), true); + + let _ = jh.await?; + Ok(()) +} diff --git a/tests/integration/pool/mod.rs b/tests/integration/pool/mod.rs index ecaa8aa7..30399e71 100644 --- a/tests/integration/pool/mod.rs +++ b/tests/integration/pool/mod.rs @@ -1,13 +1,14 @@ -use fred::client::RedisClient; +use fred::clients::RedisClient; use fred::error::RedisError; -use fred::pool::{DynamicRedisPool, StaticRedisPool}; +use fred::interfaces::*; +use fred::pool::RedisPool; use fred::types::RedisConfig; #[cfg(feature = "fd-tests")] use std::time::Duration; async fn create_and_ping_pool(config: &RedisConfig, count: usize) -> Result<(), RedisError> { - let pool = StaticRedisPool::new(config.clone(), count)?; + let pool = RedisPool::new(config.clone(), count)?; let _ = pool.connect(None); let _ = pool.wait_for_connect().await?; @@ -20,19 +21,6 @@ async fn create_and_ping_pool(config: &RedisConfig, count: usize) -> Result<(), Ok(()) } -async fn create_and_ping_dynamic_pool(config: &RedisConfig, count: usize) -> Result<(), RedisError> { - let pool = DynamicRedisPool::new(config.clone(), None, count, count * 2); - let _ = pool.connect().await; - let _ = pool.wait_for_connect().await?; - - for client in pool.clients().into_iter() { - let _ = client.ping().await?; - } - - let _ = pool.quit_pool().await; - Ok(()) -} - pub async fn should_connect_and_ping_static_pool_single_conn( _: RedisClient, config: RedisConfig, @@ -47,10 +35,6 @@ pub async fn should_connect_and_ping_static_pool_two_conn( create_and_ping_pool(&config, 2).await } -pub async fn should_connect_and_ping_dynamic_pool(_: RedisClient, config: RedisConfig) -> Result<(), RedisError> { - create_and_ping_dynamic_pool(&config, 5).await -} - // this may require increasing the number of allowed file descriptors #[cfg(feature = "fd-tests")] pub async fn should_connect_and_ping_static_pool_many_conn( diff --git a/tests/integration/pubsub/mod.rs b/tests/integration/pubsub/mod.rs index 824aaf62..c9860b35 100644 --- a/tests/integration/pubsub/mod.rs +++ b/tests/integration/pubsub/mod.rs @@ -2,6 +2,7 @@ use fred::prelude::*; use futures::StreamExt; use std::time::Duration; use tokio::time::sleep; +use fred::interfaces::PubsubInterface; const CHANNEL1: &'static str = "foo"; const CHANNEL2: &'static str = "bar"; diff --git a/tests/integration/scanning/mod.rs b/tests/integration/scanning/mod.rs index ec293f12..8d4a3689 100644 --- a/tests/integration/scanning/mod.rs +++ b/tests/integration/scanning/mod.rs @@ -36,7 +36,7 @@ pub async fn should_scan_keyspace(client: RedisClient, _: RedisConfig) -> Result pub async fn should_hscan_hash(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { for idx in 0..SCAN_KEYS { - let value = (format!("bar-{}", idx), idx.into()); + let value = (format!("bar-{}", idx), idx); let _ = client.hset("foo", value).await?; } @@ -48,7 +48,7 @@ pub async fn should_hscan_hash(client: RedisClient, _: RedisConfig) -> Result<() // scanning wont return results in any particular order, so we just check the format of the key for (key, _) in results.iter() { - let parts: Vec<&str> = key.as_str().split("-").collect(); + let parts: Vec<&str> = key.as_str().unwrap().split("-").collect(); assert!(parts[1].parse::().is_ok()); } } else { diff --git a/tests/integration/sorted_sets/mod.rs b/tests/integration/sorted_sets/mod.rs index 78ecabdf..7faeec7d 100644 --- a/tests/integration/sorted_sets/mod.rs +++ b/tests/integration/sorted_sets/mod.rs @@ -1,5 +1,6 @@ use float_cmp::approx_eq; use fred::prelude::*; +use fred::types::{Ordering, ZRange, ZRangeKind, ZSort}; use std::cmp::Ordering as CmpOrdering; use std::convert::TryInto; use std::time::Duration; @@ -455,14 +456,14 @@ pub async fn should_zrangebylex(client: RedisClient, _: RedisConfig) -> Result<( let expected = create_lex_data(&client, "foo").await?; let expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); - let old_result = client.zrangebylex("foo", "-", "+", None).await?; + let old_result: RedisValue = client.zrangebylex("foo", "-", "+", None).await?; let new_result = client .zrange("foo", "-", "+", Some(ZSort::ByLex), false, None, false) .await?; assert_eq!(old_result, new_result); assert_eq!(old_result.into_array(), expected_values); - let old_result = client.zrangebylex("foo", "a", "[c", None).await?; + let old_result: RedisValue = client.zrangebylex("foo", "a", "[c", None).await?; let new_result = client .zrange("foo", "a", "[c", Some(ZSort::ByLex), false, None, false) .await?; @@ -477,14 +478,14 @@ pub async fn should_zrevrangebylex(client: RedisClient, _: RedisConfig) -> Resul let mut expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); expected_values.reverse(); - let old_result = client.zrevrangebylex("foo", "+", "-", None).await?; + let old_result: RedisValue = client.zrevrangebylex("foo", "+", "-", None).await?; let new_result = client .zrange("foo", "+", "-", Some(ZSort::ByLex), true, None, false) .await?; assert_eq!(old_result, new_result); assert_eq!(old_result.into_array(), expected_values); - let old_result = client.zrevrangebylex("foo", "c", "[a", None).await?; + let old_result: RedisValue = client.zrevrangebylex("foo", "c", "[a", None).await?; let new_result = client .zrange("foo", "[c", "a", Some(ZSort::ByLex), true, None, false) .await?; @@ -498,14 +499,14 @@ pub async fn should_zrangebyscore(client: RedisClient, _: RedisConfig) -> Result let expected = create_count_data(&client, "foo").await?; let expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); - let old_result = client.zrangebyscore("foo", "-inf", "+inf", false, None).await?; + let old_result: RedisValue = client.zrangebyscore("foo", "-inf", "+inf", false, None).await?; let new_result = client .zrange("foo", "-inf", "+inf", Some(ZSort::ByScore), false, None, false) .await?; assert_eq!(old_result, new_result); assert_eq!(old_result.into_array(), expected_values); - let old_result = client + let old_result: RedisValue = client .zrangebyscore("foo", (COUNT / 2) as f64, COUNT as f64, false, None) .await?; let new_result = client @@ -530,7 +531,7 @@ pub async fn should_zrangebyscore(client: RedisClient, _: RedisConfig) -> Result kind: ZRangeKind::Inclusive, range: (COUNT as f64).try_into()?, }; - let old_result = client.zrangebyscore("foo", &lower, &upper, false, None).await?; + let old_result: RedisValue = client.zrangebyscore("foo", &lower, &upper, false, None).await?; let new_result = client .zrange("foo", &lower, &upper, Some(ZSort::ByScore), false, None, false) .await?; @@ -545,14 +546,14 @@ pub async fn should_zrevrangebyscore(client: RedisClient, _: RedisConfig) -> Res let mut expected_values: Vec = expected.iter().map(|(_, v)| v.clone()).collect(); expected_values.reverse(); - let old_result = client.zrevrangebyscore("foo", "+inf", "-inf", false, None).await?; + let old_result: RedisValue = client.zrevrangebyscore("foo", "+inf", "-inf", false, None).await?; let new_result = client .zrange("foo", "+inf", "-inf", Some(ZSort::ByScore), true, None, false) .await?; assert_eq!(old_result, new_result); assert_eq!(old_result.into_array(), expected_values); - let old_result = client + let old_result: RedisValue = client .zrevrangebyscore("foo", COUNT as f64, (COUNT / 2) as f64, false, None) .await?; let new_result = client @@ -577,7 +578,7 @@ pub async fn should_zrevrangebyscore(client: RedisClient, _: RedisConfig) -> Res kind: ZRangeKind::Inclusive, range: (COUNT as f64).try_into()?, }; - let old_result = client.zrevrangebyscore("foo", &upper, &lower, false, None).await?; + let old_result: RedisValue = client.zrevrangebyscore("foo", &upper, &lower, false, None).await?; let new_result = client .zrange("foo", &upper, &lower, Some(ZSort::ByScore), true, None, false) .await?; @@ -793,8 +794,8 @@ pub async fn should_zmscore_values(client: RedisClient, _: RedisConfig) -> Resul let _: () = client.zadd("foo", None, None, false, false, (idx as f64, idx)).await?; } - let result: Vec = client.zmscore("foo", vec![0, 1]).await?; - assert_eq!(result, vec!["0".into(), "1".into()]); + let result: Vec = client.zmscore("foo", vec![0, 1]).await?; + assert_eq!(result, vec![0.0, 1.0]); let result: Option = client.zmscore("foo", vec![11]).await?; assert!(result.is_none()); diff --git a/tests/integration/streams/mod.rs b/tests/integration/streams/mod.rs new file mode 100644 index 00000000..da4ded75 --- /dev/null +++ b/tests/integration/streams/mod.rs @@ -0,0 +1,720 @@ +use fred::prelude::*; +use fred::types::{XCapKind, XCapTrim, XReadResponse, XReadValue, XID}; +use std::collections::HashMap; +use std::hash::Hash; +use std::time::Duration; +use tokio::time::sleep; + +type FakeExpectedValues = Vec>>; + +async fn create_fake_group_and_stream(client: &RedisClient, key: &str) -> Result<(), RedisError> { + client.xgroup_create(key, "group1", "$", true).await +} + +async fn add_stream_entries( + client: &RedisClient, + key: &str, + count: usize, +) -> Result<(Vec, FakeExpectedValues), RedisError> { + let mut ids = Vec::with_capacity(count); + let mut expected = Vec::with_capacity(count); + for idx in 0..count { + let id: String = client.xadd(key, false, None, "*", ("count", idx)).await?; + ids.push(id.clone()); + + let mut outer = HashMap::with_capacity(1); + let mut inner = HashMap::with_capacity(1); + inner.insert("count".into(), idx); + outer.insert(id, inner); + expected.push(outer); + } + + Ok((ids, expected)) +} + +fn has_expected_value(expected: &FakeExpectedValues, actual: &FakeExpectedValues) -> bool { + actual.iter().enumerate().fold(true, |b, (i, v)| b && v == &expected[i]) +} + +pub async fn should_xinfo_consumers(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let result: Result<(), RedisError> = client.xinfo_consumers("foo{1}", "group1").await; + assert!(result.is_err()); + + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; + assert_eq!(consumers.len(), 1); + assert_eq!(consumers[0].get("name"), Some(&"consumer1".to_owned())); + + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; + assert_eq!(consumers.len(), 2); + assert_eq!(consumers[0].get("name"), Some(&"consumer1".to_owned())); + assert_eq!(consumers[1].get("name"), Some(&"consumer2".to_owned())); + + Ok(()) +} + +pub async fn should_xinfo_groups(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let result: Result<(), RedisError> = client.xinfo_groups("foo{1}").await; + assert!(result.is_err()); + + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let result: Vec> = client.xinfo_groups("foo{1}").await?; + assert_eq!(result.len(), 1); + assert_eq!(result[0].get("name"), Some(&"group1".to_owned())); + + let _: () = client.xgroup_create("foo{1}", "group2", "$", true).await?; + let result: Vec> = client.xinfo_groups("foo{1}").await?; + assert_eq!(result.len(), 2); + assert_eq!(result[0].get("name"), Some(&"group1".to_owned())); + assert_eq!(result[1].get("name"), Some(&"group2".to_owned())); + + Ok(()) +} + +pub async fn should_xinfo_streams(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let result: Result<(), RedisError> = client.xinfo_stream("foo{1}", true, None).await; + assert!(result.is_err()); + + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let mut result: HashMap = client.xinfo_stream("foo{1}", true, None).await?; + assert_eq!(result.len(), 6); + assert_eq!(result.get("length"), Some(&RedisValue::Integer(0))); + + let groups: HashMap = result.remove("groups").unwrap().convert()?; + assert_eq!(groups.get("name"), Some(&RedisValue::from("group1"))); + + Ok(()) +} + +pub async fn should_xadd_auto_id_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let result: String = client.xadd("foo{1}", false, None, "*", ("a", "b")).await?; + assert!(!result.is_empty()); + + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 1); + Ok(()) +} + +pub async fn should_xadd_manual_id_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let result: String = client.xadd("foo{1}", false, None, "1-0", ("a", "b")).await?; + assert_eq!(result, "1-0"); + + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 1); + Ok(()) +} + +pub async fn should_xadd_with_cap_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _: () = client + .xadd("foo{1}", false, ("MAXLEN", "=", 1), "*", ("a", "b")) + .await?; + + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 1); + Ok(()) +} + +pub async fn should_xadd_nomkstream_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let result: Option = client.xadd("foo{1}", true, None, "*", ("a", "b")).await?; + assert!(result.is_none()); + + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _: () = client.xadd("foo{1}", true, None, "*", ("a", "b")).await?; + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 1); + Ok(()) +} + +pub async fn should_xtrim_a_stream_approx_cap(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + + let deleted: usize = client.xtrim("foo{1}", ("MAXLEN", "~", 1)).await?; + assert!(deleted < 3); + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 3 - deleted); + + let _ = client.del("foo{1}").await?; + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let deleted: usize = client + .xtrim("foo{1}", (XCapKind::MaxLen, XCapTrim::AlmostExact, 1)) + .await?; + assert!(deleted < 3); + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 3 - deleted); + + Ok(()) +} + +pub async fn should_xtrim_a_stream_eq_cap(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + + let deleted: usize = client.xtrim("foo{1}", ("MAXLEN", "=", 1)).await?; + assert_eq!(deleted, 2); + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 1); + + let _ = client.del("foo{1}").await?; + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let deleted: usize = client.xtrim("foo{1}", (XCapKind::MaxLen, XCapTrim::Exact, 1)).await?; + assert_eq!(deleted, 2); + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 1); + + Ok(()) +} + +pub async fn should_xdel_one_id_in_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let (ids, _) = add_stream_entries(&client, "foo{1}", 2).await?; + + let deleted: usize = client.xdel("foo{1}", &ids[0]).await?; + assert_eq!(deleted, 1); + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 1); + Ok(()) +} + +pub async fn should_xdel_multiple_ids_in_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let (ids, _) = add_stream_entries(&client, "foo{1}", 3).await?; + + let deleted: usize = client.xdel("foo{1}", ids[0..2].to_vec()).await?; + assert_eq!(deleted, 2); + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 1); + Ok(()) +} + +pub async fn should_xrange_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let (_, expected) = add_stream_entries(&client, "foo{1}", 3).await?; + + let result: FakeExpectedValues = client.xrange("foo{1}", "-", "+", None).await?; + assert_eq!(result, expected); + Ok(()) +} + +pub async fn should_xrange_values_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let (ids, _) = add_stream_entries(&client, "foo{1}", 3).await?; + + let result: Vec> = client.xrange_values("foo{1}", "-", "+", None).await?; + let actual_ids: Vec = result.iter().map(|(id, _)| id.clone()).collect(); + assert_eq!(ids, actual_ids); + Ok(()) +} + +pub async fn should_xrevrange_values_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let (mut ids, _) = add_stream_entries(&client, "foo{1}", 3).await?; + ids.reverse(); + + let result: Vec> = client.xrevrange_values("foo{1}", "+", "-", None).await?; + let actual_ids: Vec = result.iter().map(|(id, _)| id.clone()).collect(); + assert_eq!(ids, actual_ids); + Ok(()) +} + +pub async fn should_xrange_with_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let (_, expected) = add_stream_entries(&client, "foo{1}", 3).await?; + + let result: FakeExpectedValues = client.xrange("foo{1}", "-", "+", Some(1)).await?; + assert!(has_expected_value(&expected, &result)); + Ok(()) +} + +pub async fn should_xrevrange_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let (_, mut expected) = add_stream_entries(&client, "foo{1}", 3).await?; + expected.reverse(); + + let result: FakeExpectedValues = client.xrevrange("foo{1}", "+", "-", None).await?; + assert_eq!(result, expected); + Ok(()) +} + +pub async fn should_xrevrange_with_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let (_, mut expected) = add_stream_entries(&client, "foo{1}", 3).await?; + expected.reverse(); + + let result: FakeExpectedValues = client.xrevrange("foo{1}", "-", "+", Some(1)).await?; + assert!(has_expected_value(&expected, &result)); + Ok(()) +} + +pub async fn should_run_xlen_on_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + check_null!(client, "foo{1}"); + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 0); + + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 3); + Ok(()) +} + +pub async fn should_xread_map_one_key(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + + let result: XReadResponse = client.xread_map(None, None, "foo{1}", "0").await?; + + for (idx, (_, record)) in result.get("foo{1}").unwrap().into_iter().enumerate() { + let count = record.get("count").expect("Failed to read count"); + assert_eq!(*count, idx); + } + + Ok(()) +} + +pub async fn should_xread_one_key_count_1(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let (mut ids, mut expected) = add_stream_entries(&client, "foo{1}", 3).await?; + let _ = ids.pop().unwrap(); + let most_recent_expected = expected.pop().unwrap(); + let second_recent_id = ids.pop().unwrap(); + + let mut expected = HashMap::new(); + expected.insert("foo{1}".into(), vec![most_recent_expected]); + + let result: HashMap>>> = client + .xread::(Some(1), None, "foo{1}", second_recent_id) + .await? + .flatten_array_values(1) + .convert()?; + assert_eq!(result, expected); + + Ok(()) +} + +pub async fn should_xread_multiple_keys_count_2(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = create_fake_group_and_stream(&client, "bar{1}").await?; + let (foo_ids, foo_inner) = add_stream_entries(&client, "foo{1}", 3).await?; + let (bar_ids, bar_inner) = add_stream_entries(&client, "bar{1}", 3).await?; + + let mut expected = HashMap::new(); + expected.insert("foo{1}".into(), foo_inner[1..].to_vec()); + expected.insert("bar{1}".into(), bar_inner[1..].to_vec()); + + let ids: Vec = vec![foo_ids[0].as_str().into(), bar_ids[0].as_str().into()]; + let result: HashMap>>> = client + .xread::(Some(2), None, vec!["foo{1}", "bar{1}"], ids) + .await? + .flatten_array_values(1) + .convert()?; + assert_eq!(result, expected); + + Ok(()) +} + +pub async fn should_xread_with_blocking(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let expected_id = "123456789-0"; + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + + let mut expected = HashMap::new(); + let mut inner = HashMap::new(); + let mut fields = HashMap::new(); + fields.insert("count".into(), 100); + inner.insert(expected_id.into(), fields); + expected.insert("foo{1}".into(), vec![inner]); + + let add_client = client.clone_new(); + tokio::spawn(async move { + let _ = add_client.connect(None); + let _ = add_client.wait_for_connect().await?; + sleep(Duration::from_millis(500)).await; + + let _: () = add_client + .xadd("foo{1}", false, None, expected_id, ("count", 100)) + .await?; + let _ = add_client.quit().await?; + Ok::<(), RedisError>(()) + }); + + let result: HashMap>>> = client + .xread::(None, Some(5000), "foo{1}", XID::Max) + .await? + .flatten_array_values(1) + .convert()?; + assert_eq!(result, expected); + + Ok(()) +} + +pub async fn should_xgroup_create_no_mkstream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let result: Result = client.xgroup_create("foo{1}", "group1", "$", false).await; + assert!(result.is_err()); + let _: () = client.xadd("foo{1}", false, None, "*", ("count", 1)).await?; + let _: () = client.xgroup_create("foo{1}", "group1", "$", false).await?; + + Ok(()) +} + +pub async fn should_xgroup_create_mkstream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _: () = client.xgroup_create("foo{1}", "group1", "$", true).await?; + let len: usize = client.xlen("foo{1}").await?; + assert_eq!(len, 0); + + Ok(()) +} + +pub async fn should_xgroup_createconsumer(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let len: usize = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + assert_eq!(len, 1); + + let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; + assert_eq!(consumers[0].get("name").unwrap(), &RedisValue::from("consumer1")); + assert_eq!(consumers[0].get("pending").unwrap(), &RedisValue::from(0)); + + Ok(()) +} + +pub async fn should_xgroup_delconsumer(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let len: usize = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + assert_eq!(len, 1); + + let len: usize = client.xgroup_delconsumer("foo{1}", "group1", "consumer1").await?; + assert_eq!(len, 0); + + let consumers: Vec> = client.xinfo_consumers("foo{1}", "group1").await?; + assert!(consumers.is_empty()); + Ok(()) +} + +pub async fn should_xgroup_destroy(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let len: usize = client.xgroup_destroy("foo{1}", "group1").await?; + assert_eq!(len, 1); + + Ok(()) +} + +pub async fn should_xgroup_setid(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _: () = client.xgroup_setid("foo{1}", "group1", "12345-0").await?; + + Ok(()) +} + +pub async fn should_xreadgroup_one_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + + let result: XReadResponse = client + .xreadgroup_map("group1", "consumer1", None, None, false, "foo{1}", ">") + .await?; + + assert_eq!(result.len(), 1); + for (idx, (_, record)) in result.get("foo{1}").unwrap().into_iter().enumerate() { + let value = record.get("count").expect("Failed to read count"); + assert_eq!(idx, *value); + } + + Ok(()) +} + +pub async fn should_xreadgroup_multiple_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = create_fake_group_and_stream(&client, "bar{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let _ = add_stream_entries(&client, "bar{1}", 1).await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("bar{1}", "group1", "consumer1").await?; + + let result: XReadResponse = client + .xreadgroup_map( + "group1", + "consumer1", + None, + None, + false, + vec!["foo{1}", "bar{1}"], + vec![">", ">"], + ) + .await?; + + assert_eq!(result.len(), 2); + for (idx, (_, record)) in result.get("foo{1}").unwrap().into_iter().enumerate() { + let value = record.get("count").expect("Failed to read count"); + assert_eq!(idx, *value); + } + let bar_records = result.get("bar{1}").unwrap(); + assert_eq!(bar_records.len(), 1); + assert_eq!(*bar_records[0].1.get("count").unwrap(), 0); + + Ok(()) +} + +pub async fn should_xreadgroup_block(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + + let add_client = client.clone_new(); + tokio::spawn(async move { + let _ = add_client.connect(None); + let _ = add_client.wait_for_connect().await?; + sleep(Duration::from_secs(1)).await; + + let _: () = add_client.xadd("foo{1}", false, None, "*", ("count", 100)).await?; + let _ = add_client.quit().await?; + Ok::<_, RedisError>(()) + }); + + let mut result: XReadResponse = client + .xreadgroup_map("group1", "consumer1", None, Some(10_000), false, "foo{1}", ">") + .await?; + + assert_eq!(result.len(), 1); + let records = result.remove("foo{1}").unwrap(); + assert_eq!(records.len(), 1); + let count = records[0].1.get("count").unwrap(); + assert_eq!(*count, 100); + + Ok(()) +} + +pub async fn should_xack_one_id(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 1).await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + + let result: XReadResponse = client + .xreadgroup_map("group1", "consumer1", None, None, false, "foo{1}", ">") + .await?; + assert_eq!(result.len(), 1); + let records = result.get("foo{1}").unwrap(); + let id = records[0].0.clone(); + + let result: i64 = client.xack("foo{1}", "group1", id).await?; + assert_eq!(result, 1); + Ok(()) +} + +pub async fn should_xack_multiple_ids(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + + let result: XReadResponse = client + .xreadgroup_map("group1", "consumer1", None, None, false, "foo{1}", ">") + .await?; + assert_eq!(result.len(), 1); + let records = result.get("foo{1}").unwrap(); + let ids: Vec = records.iter().map(|(id, _)| id.clone()).collect(); + + let result: i64 = client.xack("foo{1}", "group1", ids).await?; + assert_eq!(result, 3); + Ok(()) +} + +pub async fn should_xclaim_one_id(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + + let mut result: XReadResponse = client + .xreadgroup_map("group1", "consumer1", Some(1), None, false, "foo{1}", ">") + .await?; + assert_eq!(result.len(), 1); + assert_eq!(result.get("foo{1}").unwrap().len(), 1); + let first_read_id = result.get_mut("foo{1}").unwrap().pop().unwrap().0; + sleep(Duration::from_secs(1)).await; + + let (total_count, min_id, max_id, consumers): (u64, String, String, Vec<(String, u64)>) = + client.xpending("foo{1}", "group1", ()).await?; + assert_eq!(total_count, 1); + assert_eq!(min_id, first_read_id); + assert_eq!(max_id, first_read_id); + assert_eq!(consumers[0], ("consumer1".into(), 1)); + + let mut result: Vec<(String, HashMap)> = client + .xclaim_values( + "foo{1}", + "group1", + "consumer2", + 1000, + &first_read_id, + None, + None, + None, + false, + false, + ) + .await?; + + assert_eq!(result.len(), 1); + assert_eq!(result[0].0.as_str(), first_read_id); + let value = result[0].1.remove("count").unwrap(); + assert_eq!(value, 0); + + let acked: i64 = client.xack("foo{1}", "group1", first_read_id).await?; + assert_eq!(acked, 1); + Ok(()) +} + +pub async fn should_xclaim_multiple_ids(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + + let mut result: XReadResponse = client + .xreadgroup_map("group1", "consumer1", Some(2), None, false, "foo{1}", ">") + .await?; + assert_eq!(result.len(), 1); + assert_eq!(result.get("foo{1}").unwrap().len(), 2); + let second_read_id = result.get_mut("foo{1}").unwrap().pop().unwrap().0; + let first_read_id = result.get_mut("foo{1}").unwrap().pop().unwrap().0; + sleep(Duration::from_secs(1)).await; + + let (total_count, min_id, max_id, consumers): (u64, String, String, Vec<(String, u64)>) = + client.xpending("foo{1}", "group1", ()).await?; + assert_eq!(total_count, 2); + assert_eq!(min_id, first_read_id); + assert_eq!(max_id, second_read_id); + assert_eq!(consumers[0], ("consumer1".into(), 2)); + + let mut result: Vec<(String, HashMap)> = client + .xclaim_values( + "foo{1}", + "group1", + "consumer2", + 1000, + vec![&first_read_id, &second_read_id], + None, + None, + None, + false, + false, + ) + .await?; + + assert_eq!(result.len(), 2); + assert_eq!(result[0].0.as_str(), first_read_id); + assert_eq!(result[1].0.as_str(), second_read_id); + let first_value = result[0].1.remove("count").unwrap(); + let second_value = result[1].1.remove("count").unwrap(); + assert_eq!(first_value, 0); + assert_eq!(second_value, 1); + + let acked: i64 = client + .xack("foo{1}", "group1", vec![first_read_id, second_read_id]) + .await?; + assert_eq!(acked, 2); + Ok(()) +} + +pub async fn should_xclaim_with_justid(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + + let mut result: XReadResponse = client + .xreadgroup_map("group1", "consumer1", Some(2), None, false, "foo{1}", ">") + .await?; + assert_eq!(result.len(), 1); + assert_eq!(result.get("foo{1}").unwrap().len(), 2); + let second_read_id = result.get_mut("foo{1}").unwrap().pop().unwrap().0; + let first_read_id = result.get_mut("foo{1}").unwrap().pop().unwrap().0; + sleep(Duration::from_secs(1)).await; + + let (total_count, min_id, max_id, consumers): (u64, String, String, Vec<(String, u64)>) = + client.xpending("foo{1}", "group1", ()).await?; + assert_eq!(total_count, 2); + assert_eq!(min_id, first_read_id); + assert_eq!(max_id, second_read_id); + assert_eq!(consumers[0], ("consumer1".into(), 2)); + + let result: Vec = client + .xclaim( + "foo{1}", + "group1", + "consumer2", + 1000, + vec![&first_read_id, &second_read_id], + None, + None, + None, + false, + true, + ) + .await?; + assert_eq!(result, vec![first_read_id.clone(), second_read_id.clone()]); + + let acked: i64 = client + .xack("foo{1}", "group1", vec![first_read_id, second_read_id]) + .await?; + assert_eq!(acked, 2); + Ok(()) +} + +pub async fn should_xautoclaim_default(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let _ = create_fake_group_and_stream(&client, "foo{1}").await?; + let _ = add_stream_entries(&client, "foo{1}", 3).await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer1").await?; + let _: () = client.xgroup_createconsumer("foo{1}", "group1", "consumer2").await?; + + let mut result: XReadResponse = client + .xreadgroup_map("group1", "consumer1", Some(2), None, false, "foo{1}", ">") + .await?; + assert_eq!(result.len(), 1); + assert_eq!(result.get("foo{1}").unwrap().len(), 2); + let second_read_id = result.get_mut("foo{1}").unwrap().pop().unwrap().0; + let first_read_id = result.get_mut("foo{1}").unwrap().pop().unwrap().0; + sleep(Duration::from_secs(1)).await; + + let (total_count, min_id, max_id, consumers): (u64, String, String, Vec<(String, u64)>) = + client.xpending("foo{1}", "group1", ()).await?; + assert_eq!(total_count, 2); + assert_eq!(min_id, first_read_id); + assert_eq!(max_id, second_read_id); + assert_eq!(consumers[0], ("consumer1".into(), 2)); + + let (cursor, values): (String, Vec>) = client + .xautoclaim_values("foo{1}", "group1", "consumer2", 1000, "0-0", None, false) + .await?; + + assert_eq!(cursor, "0-0"); + assert_eq!(values.len(), 2); + + let mut first_expected: HashMap = HashMap::new(); + first_expected.insert("count".into(), 0); + let mut second_expected: HashMap = HashMap::new(); + second_expected.insert("count".into(), 1); + assert_eq!(values[0], (first_read_id, first_expected)); + assert_eq!(values[1], (second_read_id, second_expected)); + + Ok(()) +} diff --git a/tests/integration/utils.rs b/tests/integration/utils.rs index ab204437..c950f70f 100644 --- a/tests/integration/utils.rs +++ b/tests/integration/utils.rs @@ -1,10 +1,12 @@ #![allow(unused_macros)] use crate::chaos_monkey::set_test_kind; -use fred::client::RedisClient; +use fred::clients::RedisClient; use fred::error::RedisError; -use fred::globals; -use fred::types::{ReconnectPolicy, RedisConfig, ServerConfig}; +use fred::interfaces::*; +use fred::types::{PerformanceConfig, ReconnectPolicy, RedisConfig, ServerConfig}; +use redis_protocol::resp3::prelude::RespVersion; +use std::default::Default; use std::env; use std::future::Future; @@ -30,6 +32,24 @@ fn read_fail_fast_env() -> bool { } } +fn read_redis_centralized_host() -> (String, u16) { + let host = read_env_var("FRED_REDIS_CENTRALIZED_HOST").unwrap_or("127.0.0.1".into()); + let port = read_env_var("FRED_REDIS_CENTRALIZED_PORT") + .and_then(|s| s.parse::().ok()) + .unwrap_or(6379); + + (host, port) +} + +fn read_redis_cluster_host() -> (String, u16) { + let host = read_env_var("FRED_REDIS_CLUSTER_HOST").unwrap_or("127.0.0.1".into()); + let port = read_env_var("FRED_REDIS_CLUSTER_PORT") + .and_then(|s| s.parse::().ok()) + .unwrap_or(30001); + + (host, port) +} + #[cfg(feature = "sentinel-auth")] fn read_redis_password() -> String { read_env_var("REDIS_PASSWORD").expect("Failed to read REDIS_PASSWORD env") @@ -44,7 +64,7 @@ fn read_sentinel_password() -> String { fn read_sentinel_hostname() -> String { if read_env_var("CIRCLECI_TESTS").is_some() { "redis-sentinel-1".to_owned() - }else{ + } else { "127.0.0.1".to_owned() } } @@ -71,7 +91,11 @@ where username: None, password: Some(read_sentinel_password()), }, - pipeline, + performance: PerformanceConfig { + pipeline, + default_command_timeout_ms: 10_000, + ..Default::default() + }, password: Some(read_redis_password()), ..Default::default() }; @@ -86,19 +110,26 @@ where let _ = client.quit().await; } -pub async fn run_cluster(func: F, pipeline: bool) +pub async fn run_cluster(func: F, pipeline: bool, resp3: bool) where F: Fn(RedisClient, RedisConfig) -> Fut, Fut: Future>, { set_test_kind(true); - globals::set_default_command_timeout(10_000); let policy = ReconnectPolicy::new_constant(300, RECONNECT_DELAY); + let (host, port) = read_redis_cluster_host(); let config = RedisConfig { fail_fast: read_fail_fast_env(), - server: ServerConfig::default_clustered(), - pipeline, + server: ServerConfig::Clustered { + hosts: vec![(host, port)], + }, + version: if resp3 { RespVersion::RESP3 } else { RespVersion::RESP2 }, + performance: PerformanceConfig { + pipeline, + default_command_timeout_ms: 10_000, + ..Default::default() + }, ..Default::default() }; let client = RedisClient::new(config.clone()); @@ -112,19 +143,24 @@ where let _ = client.quit().await; } -pub async fn run_centralized(func: F, pipeline: bool) +pub async fn run_centralized(func: F, pipeline: bool, resp3: bool) where F: Fn(RedisClient, RedisConfig) -> Fut, Fut: Future>, { set_test_kind(false); - globals::set_default_command_timeout(10_000); let policy = ReconnectPolicy::new_constant(300, RECONNECT_DELAY); + let (host, port) = read_redis_centralized_host(); let config = RedisConfig { fail_fast: read_fail_fast_env(), - server: ServerConfig::default_centralized(), - pipeline, + server: ServerConfig::Centralized { host, port }, + version: if resp3 { RespVersion::RESP3 } else { RespVersion::RESP2 }, + performance: PerformanceConfig { + pipeline, + default_command_timeout_ms: 10_000, + ..Default::default() + }, ..Default::default() }; let client = RedisClient::new(config.clone()); @@ -142,18 +178,36 @@ macro_rules! centralized_test_panic( ($module:tt, $name:tt) => { #[cfg(not(feature="sentinel-tests"))] mod $name { - #[tokio::test] - #[should_panic] - async fn pipelined() { - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, true).await; + mod resp2 { + #[tokio::test] + #[should_panic] + async fn pipelined() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, true, false).await; + } + + #[tokio::test] + #[should_panic] + async fn no_pipeline() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, false, false).await; + } } - #[tokio::test] - #[should_panic] - async fn no_pipeline() { - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, false).await; + mod resp3 { + #[tokio::test] + #[should_panic] + async fn pipelined() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, true, true).await; + } + + #[tokio::test] + #[should_panic] + async fn no_pipeline() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, false, true).await; + } } } @@ -180,18 +234,36 @@ macro_rules! cluster_test_panic( ($module:tt, $name:tt) => { #[cfg(not(feature="sentinel-tests"))] mod $name { - #[tokio::test] - #[should_panic] - async fn pipelined() { - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, true).await; + mod resp2 { + #[tokio::test] + #[should_panic] + async fn pipelined() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, true, false).await; + } + + #[tokio::test] + #[should_panic] + async fn no_pipeline() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, false, false).await; + } } - #[tokio::test] - #[should_panic] - async fn no_pipeline() { - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, false).await; + mod resp3 { + #[tokio::test] + #[should_panic] + async fn pipelined() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, true, true).await; + } + + #[tokio::test] + #[should_panic] + async fn no_pipeline() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, false, true).await; + } } } } @@ -201,16 +273,32 @@ macro_rules! centralized_test( ($module:tt, $name:tt) => { #[cfg(not(feature="sentinel-tests"))] mod $name { - #[tokio::test] - async fn pipelined() { - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, true).await; + mod resp2 { + #[tokio::test] + async fn pipelined() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, true, false).await; + } + + #[tokio::test] + async fn no_pipeline() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, false, false).await; + } } - #[tokio::test] - async fn no_pipeline() { - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_centralized(crate::integration::$module::$name, false).await; + mod resp3 { + #[tokio::test] + async fn pipelined() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, true, true).await; + } + + #[tokio::test] + async fn no_pipeline() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_centralized(crate::integration::$module::$name, false, true).await; + } } } @@ -235,16 +323,32 @@ macro_rules! cluster_test( ($module:tt, $name:tt) => { #[cfg(not(feature="sentinel-tests"))] mod $name { - #[tokio::test] - async fn pipelined() { - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, true).await; + mod resp2 { + #[tokio::test] + async fn pipelined() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, true, false).await; + } + + #[tokio::test] + async fn no_pipeline() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, false, false).await; + } } - #[tokio::test] - async fn no_pipeline() { - let _ = pretty_env_logger::try_init(); - crate::integration::utils::run_cluster(crate::integration::$module::$name, false).await; + mod resp3 { + #[tokio::test] + async fn pipelined() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, true, true).await; + } + + #[tokio::test] + async fn no_pipeline() { + let _ = pretty_env_logger::try_init(); + crate::integration::utils::run_cluster(crate::integration::$module::$name, false, true).await; + } } } } diff --git a/tests/lib.rs b/tests/lib.rs index dcd9a515..99e05d5f 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -1,6 +1,11 @@ +#![allow(unused_imports)] + #[macro_use] extern crate log; extern crate pretty_env_logger; +#[macro_use] +extern crate maplit; + mod chaos_monkey; mod integration; diff --git a/tests/minikube.md b/tests/minikube.md new file mode 100644 index 00000000..c757c142 --- /dev/null +++ b/tests/minikube.md @@ -0,0 +1,119 @@ +Minikube +======== + +Running Redis inside minikube can be difficult. This document will cover some of the issues associated with the following common use case: + +* You're running a Redis **cluster** or Redis sentinel inside minikube. If you're just using a centralized server nothing in this document will likely apply to you. +* You're running your application outside of minikube, but it needs to connect to Redis inside minikube. This is often how people use minikube while actively developing their app layer. +* You're using either the virtualbox driver or the docker driver without host networking for minikube. In other words, minikube is running its own virtual network. +* You have limited access to the minikube network from outside the minikube cluster. This may come in of the form of a load balancer or reverse proxy where your app layer connects to this proxy instead of anything directly inside the minikube cluster. + +Before getting too far into this it's necessary to understand how Redis cluster and Redis sentinel work, and why they make this difficult. + +### Redis Cluster + +The Redis cluster features work by sharding data across multiple Redis servers. + +Keys are assigned a hash slot where each hash slot is a 16-bit integer that comes from a CRC16 of the key or key [hash tag](https://redis.io/topics/cluster-spec#keys-hash-tags). Each primary/main node in the cluster is responsible for a range of hash slots. Any node can cover multiple unique hash slots ranges, but all possible hash slots in the range `0-16383` must be covered. A hash slot can only belong to one node in the cluster. + +The key hashing logic is implemented [here](https://docs.rs/redis-protocol/latest/redis_protocol/fn.redis_keyslot.html). + +The state of the cluster hash slot assignments can be seen via the `CLUSTER NODES` command. Here is a sample output of `CLUSTER NODES` from a local cluster started via the `create-cluster` script that comes with Redis. + +``` +07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected +67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002 master - 0 1426238316232 2 connected 5461-10922 +292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003 master - 0 1426238318243 3 connected 10923-16383 +6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected +824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected +e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 myself,master - 0 0 1 connected 0-5460 +``` + +In this example there are 3 primary nodes and 3 replicas. Each primary has one replica. The 16383 hash slots are spread evenly across the 3 primary nodes. + +* `127.0.0.1:30001` covers the hash slot range 0-5460 +* `127.0.0.1:30002` covers the hash slot range 5461-10922 +* `127.0.0.1:30003` covers the hash slot range 10923-16383 + +In order for any Redis client to support the full command set with any possible key it needs to connect to 3 nodes. There are several ways this can be done, but generally speaking most clients will maintain at least one connection to each primary node in the cluster. + +There are a couple other things to note about Redis cluster usage: + +* Cluster state can change. Replicas can be promoted to primary nodes, and vice versa when servers shut down. +* Hash slots can move between nodes. +* Nodes can be added or removed from the cluster, which often comes with a hash slot rebalancing operation. + +This doc will not cover how this happens because it's often not necessary to know that for minikube usage. However there are a few important things to remember with Redis clusters and minikube: + +* Clients often need to be aware of the state of all cluster nodes in order to know which nodes need a connection. Generally speaking clients connect to each primary node. +* The `CLUSTER NODES` response contains IP addresses for all of the nodes. +* The client has to use the `CLUSTER NODES` response to initiate connections to other nodes. + +The general idea is that at any time a client can send the `CLUSTER NODES` command to _any_ node, and that node will return the state of the entire cluster. Clients then use that response to initiate connections to all the other nodes. + +This is how fred works. When initiating a connection to a cluster it does the following: + +1. Read the client's `ServerConfig::Cluster` to find the list of known hosts/ports provided by the caller. +2. Try to connect to each of them. If none can accept a connection then return an error. +3. Once a connection is established run the `CLUSTER NODES` command. +4. Parse the `CLUSTER NODES` response to read the IP address and port of each node in the cluster. +5. Initiate connections to each of the main/primary nodes in the cluster. +6. Start accepting commands from callers. + +### Redis Sentinel + +When using a Redis server behind a Sentinel layer the process is somewhat similar to a cluster. Redis Sentinel works by adding a management layer in front of the Redis server that tracks the health of the Redis server. If the Redis server dies unexpectedly the sentinel can fail over to a replica. Clients connect to the sentinel first in order to learn about how they should connect to the backing Redis server. Typically these sentinel nodes are deployed on different machines or containers. + +When connecting to a sentinel node the client does the following: + +1. Read the client's `ServerConfig::Sentinel` to find the list of known hosts/ports for the sentinel nodes provided by the caller. +2. Try to connect to each of the sentinel nodes. If none can accept connections then return an error. +3. Run the `SENTINEL get-master-addr-by-name` command on the sentinel node. +4. Parse the response to find the IP address / port for the main Redis server. The sentinel will change this if it needs to fail over to a replica, etc. +5. Connect to the IP address / port from the previous step. Return an error if a connection cannot be established. +6. Start accepting commands from callers. + +### Minikube Considerations + +**The key thing to take away from the previous sections is that for both Redis cluster and Redis Sentinel the servers will redirect clients by returning IP addresses and ports for other server instances.** + +This implementation decision by Redis assumes a few things about your client setup: + +* The client has access to the same network as the server. An IP address X.X.X.X maps to the same thing from the client's perspective as it does from the server's perspective. +* The client's DNS settings are similar to the server's DNS settings. This really only matters with TLS, which is not covered in this doc. + +When using minikube neither of the above assumptions are true due to how minikube runs its own private network. + +You may connect to minikube via a 10.0.0.0/24 address, but inside minikube everything uses a 172.168.0.0/16 address, or something other than the 10/24 range used outside the minikube cluster. + +This is problematic for a few reasons. Consider the Redis Cluster example: + +1. You provide the address `10.0.0.1` to fred as the IP address for one of the cluster nodes. Maybe the other nodes are visible outside minikube, maybe not. It doesn't really matter though in this example. +2. Fred connects to `10.0.0.1` successfully and runs `CLUSTER NODES`. +3. When parsing the response it sees `172.168.0.0/16` addresses instead of `10.0.0.0/24` addresses, since that's the IP range used inside minikube. +4. Fred tries to connect to one of those IP addresses, let's say `172.168.0.1`, but it fails with a networking error (typically `HOSTUNREACHABLE`). + +This happens because the network outside minikube has no idea how to access the IP addresses inside minikube. The minikube network is private and might as well be in another data center. Your host machine has no idea how to send traffic to anything in that `172.168.0.0/16` range directly without more information. + +This is why many times people run reverse proxies (such as Caddy) with minikube. One of the goals of services like Caddy is to handle this exact situation. + +Folks typically discover this since their HTTP app can't be accessed inside minikube. Then they find out about Caddy, and they look for something similar to use for Redis (or other data layer services). + +However, Redis is uniquely ill-suited for this kind of usage. With HTTP the story is pretty simple. The client and reverse proxy can rely on DNS features to identify servers and re-route traffic. With Redis the problem is more complicated because Redis returns IP addresses, not hostnames, so we cannot leverage DNS rules to re-route traffic. + +This is why Redis Cluster or Redis Sentinel + Minikube is complicated. + +### Solutions + +There are not currently many great ways to solve this. + +As you probably gathered, using a centralized Redis server with minikube avoids all of these problems. You only need to poke a small hole into the minikube network, and the client can simply connect to the one Redis server via that exposed address/port from the host. Since there's only one node the server will never redirect the client elsewhere. **This is almost always the easiest option.** + +If your main goal with using minikube is to facilitate easy local development this option often works fine. A common setup is to use a centralized server in minikube, but clusters in "real" environments like staging and production. Callers then just need to write code such that they can configure the client easily via argv, environment variables, or configuration files. Then all you need to do is use a different configuration in upper environments to switch to a cluster. + +However, if you need access to a cluster in order to use `CLUSTER *` commands, etc, then another option is to run minikube in a way where each container in the cluster is visible to the host machine. + +If you're using the docker driver you have a few other options as well. Docker networking is easier to manage than virtualbox networking. It is also possible to "dockerize" your local build or run script so that it runs via `docker` or `docker-compose`. If you do this you can supply the minikube docker network as an argument to your `docker run` command, which will run your container inside the same network as the Redis server. This is what Fred does in its CI configuration since CircleCI runs some of the Redis servers on a remote docker engine. + +Aside from working around the networking restrictions unfortunately I'm not sure of a great solution here. It's possible that there are services out there to deal with this issue, but I'm not aware of them. If you know of a better solution here please file an issue to update this document and I'd be happy to add it. + diff --git a/tests/runners/all-features.sh b/tests/runners/all-features.sh index 724f821b..fd96bd80 100755 --- a/tests/runners/all-features.sh +++ b/tests/runners/all-features.sh @@ -11,8 +11,11 @@ do done # cant use all-features here or it'll run chaos monkey and then the tests will take forever -cargo test --release --lib --tests --features \ - "index-map network-logs pool-prefer-active enable-tls vendored-tls - custom-reconnect-errors ignore-auth-error blocking-encoding full-tracing - reconnect-on-auth-error monitor metrics sentinel-client" \ - -- --test-threads=1 "$@" \ No newline at end of file +FEATURES="network-logs pool-prefer-active enable-tls vendored-tls custom-reconnect-errors ignore-auth-error serde-json + blocking-encoding full-tracing reconnect-on-auth-error monitor metrics sentinel-client subscriber-client" + +if [ -z "$FRED_CI_NEXTEST" ]; then + cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" +else + cargo nextest run --release --lib --tests --features "$FEATURES" --test-threads=1 "$@" +fi \ No newline at end of file diff --git a/tests/runners/default-features.sh b/tests/runners/default-features.sh index d7c00725..9cad8ae9 100755 --- a/tests/runners/default-features.sh +++ b/tests/runners/default-features.sh @@ -10,4 +10,8 @@ do fi done -cargo test --release --lib --tests -- --test-threads=1 "$@" \ No newline at end of file +if [ -z "$FRED_CI_NEXTEST" ]; then + cargo test --release --lib --tests -- --test-threads=1 "$@" +else + cargo nextest run --release --lib --tests --test-threads=1 "$@" +fi \ No newline at end of file diff --git a/tests/runners/no-features.sh b/tests/runners/no-features.sh index 21cb11ec..cca05cd1 100755 --- a/tests/runners/no-features.sh +++ b/tests/runners/no-features.sh @@ -10,4 +10,8 @@ do fi done -cargo test --release --lib --tests --no-default-features -- --test-threads=1 "$@" \ No newline at end of file +if [ -z "$FRED_CI_NEXTEST" ]; then + cargo test --release --lib --tests --no-default-features -- --test-threads=1 "$@" +else + cargo nextest run --release --lib --tests --no-default-features --test-threads=1 "$@" +fi \ No newline at end of file diff --git a/tests/runners/sentinel-features.sh b/tests/runners/sentinel-features.sh index 2b1744c3..d1255585 100755 --- a/tests/runners/sentinel-features.sh +++ b/tests/runners/sentinel-features.sh @@ -10,4 +10,10 @@ do fi done -cargo test --release --features "network-logs sentinel-tests sentinel-auth" --lib --tests -- --test-threads=1 "$@" \ No newline at end of file +FEATURES="network-logs sentinel-tests sentinel-auth" + +if [ -z "$FRED_CI_NEXTEST" ]; then + cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" +else + cargo nextest run --release --lib --tests --features "$FEATURES" --test-threads=1 "$@" +fi \ No newline at end of file diff --git a/tests/scripts/install_cargo_nextest.sh b/tests/scripts/install_cargo_nextest.sh new file mode 100755 index 00000000..d2b12235 --- /dev/null +++ b/tests/scripts/install_cargo_nextest.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -z "$FRED_CI_NEXTEST" ]; then + echo "Skip installing cargo-nextest" +else + cargo install cargo-nextest +fi \ No newline at end of file